Compare commits

..

6 Commits

Author SHA1 Message Date
Tõnis Tiigi
1fc5647dc2 Merge pull request #2846 from tonistiigi/v0.19.2-picks
[v0.19] v0.19.2 cherry-picks
2024-12-06 14:45:47 -08:00
Tonis Tiigi
b2c0c26c26 bake: allow entitlements from overrides automatically
If override specifies a path, mark it automatically allowed
so there is no need to use duplicate flags for defining the
same feature.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit dd596d6542)
2024-12-06 14:15:46 -08:00
Tonis Tiigi
c30db6a955 bake: fix entitlements path checks for local outputs
Previous check based on dest attributes was not correct
as the attributes already get converted before validation happens.

Because the local path is not preserved for single-file
outputs and gets replaced by io.Writer, a temporary array variable
was needed. This value should instead be added to ExportEntry
struct in BuildKit in future revision.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit c6e403ad7f)
2024-12-06 14:02:04 -08:00
Tõnis Tiigi
5113f9ea89 Merge pull request #2824 from jsternberg/bake-revert-composable-attributes
[v0.19] revert: "bake: initial set of composable bake attributes"
2024-11-27 09:37:55 -08:00
CrazyMax
8b029626f3 bake: additional test for empty variable
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2024-11-27 18:28:43 +01:00
Jonathan A. Sternberg
cd017e98ed revert: "bake: initial set of composable bake attributes"
This reverts commit 3ccbb88e6a.

Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
2024-11-27 09:39:57 -06:00
3135 changed files with 159267 additions and 162334 deletions

5
.github/labeler.yml vendored
View File

@@ -96,11 +96,6 @@ area/hack:
- changed-files:
- any-glob-to-any-file: 'hack/**'
# Add 'area/history' label to changes in history command
area/history:
- changed-files:
- any-glob-to-any-file: 'commands/history/**'
# Add 'area/tests' label to changes in test files
area/tests:
- changed-files:

View File

@@ -28,8 +28,8 @@ on:
- 'docs/**'
env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
BUILDX_VERSION: "latest"
BUILDKIT_IMAGE: "moby/buildkit:latest"
SCOUT_VERSION: "1.11.0"
REPO_SLUG: "docker/buildx-bin"
DESTDIR: "./bin"
@@ -54,9 +54,9 @@ jobs:
- master
- latest
- buildx-stable-1
- v0.20.2
- v0.19.0
- v0.18.2
- v0.17.2
- v0.16.0
- v0.15.2
worker:
- docker-container
- remote
@@ -76,16 +76,6 @@ jobs:
- worker: docker+containerd # same as docker, but with containerd snapshotter
pkg: ./tests
mode: experimental
- worker: "docker@27.5"
pkg: ./tests
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
pkg: ./tests
- worker: "docker@27.5"
pkg: ./tests
mode: experimental
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
pkg: ./tests
mode: experimental
- worker: "docker@26.1"
pkg: ./tests
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
@@ -131,14 +121,13 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build test image
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
source: .
targets: integration-test
set: |
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
@@ -184,11 +173,6 @@ jobs:
env:
SKIP_INTEGRATION_TESTS: 1
steps:
-
name: Setup Git config
run: |
git config --global core.autocrlf false
git config --global core.eol lf
-
name: Checkout
uses: actions/checkout@v4
@@ -250,73 +234,6 @@ jobs:
name: test-reports-${{ env.TESTREPORTS_NAME }}
path: ${{ env.TESTREPORTS_BASEDIR }}
test-bsd-unit:
runs-on: ubuntu-22.04
continue-on-error: true
strategy:
fail-fast: false
matrix:
os:
- freebsd
- netbsd
- openbsd
env:
# https://github.com/hashicorp/vagrant/issues/13652
VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT: 1
steps:
-
name: Prepare
run: |
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
# Sets semver Go version to be able to download tarball during vagrant setup
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
-
name: Checkout
uses: actions/checkout@v4
-
name: Cache Vagrant boxes
uses: actions/cache@v4
with:
path: ~/.vagrant.d/boxes
key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
restore-keys: |
${{ runner.os }}-vagrant-${{ matrix.os }}-
-
name: Install vagrant
run: |
set -x
wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt-get update
sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
sudo systemctl enable --now libvirtd
sudo chmod a+rw /var/run/libvirt/libvirt-sock
vagrant plugin install vagrant-libvirt
vagrant --version
-
name: Set up vagrant
run: |
ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
vagrant up --no-tty
-
name: Test
run: |
vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
-
name: Upload coverage
if: always()
uses: codecov/codecov-action@v5
with:
files: ./coverage.txt
env_vars: RUNNER_OS
flags: unit,${{ matrix.os }}
token: ${{ secrets.CODECOV_TOKEN }}
env:
RUNNER_OS: ${{ matrix.os }}
govulncheck:
runs-on: ubuntu-24.04
permissions:
@@ -325,16 +242,19 @@ jobs:
# required to write sarif report
security-events: write
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Run
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
targets: govulncheck
env:
@@ -388,8 +308,8 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
@@ -414,15 +334,6 @@ jobs:
- test-unit
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
steps:
-
name: Free disk space
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
with:
android: true
dotnet: true
haskell: true
large-packages: true
swap-storage: true
-
name: Checkout
uses: actions/checkout@v4
@@ -433,8 +344,8 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Docker meta
@@ -457,9 +368,8 @@ jobs:
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
-
name: Build and push image
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
source: .
files: |
./docker-bake.hcl
${{ steps.meta.outputs.bake-file }}
@@ -481,6 +391,9 @@ jobs:
needs:
- bin-image
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Login to DockerHub
uses: docker/login-action@v3
@@ -535,7 +448,7 @@ jobs:
-
name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v')
uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2.2.2
uses: softprops/action-gh-release@01570a1f39cb168c169c802c3bceb9e93fb10974 # v2.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -19,10 +19,6 @@ on:
types:
- released
env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
jobs:
open-pr:
runs-on: ubuntu-24.04
@@ -50,13 +46,9 @@ jobs:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Generate yaml
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
targets: update-docs
@@ -77,7 +69,7 @@ jobs:
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
-
name: Create PR on docs repo
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
push-to-fork: docker-tools-robot/docker.github.io

View File

@@ -29,24 +29,21 @@ on:
- '.github/workflows/docs-upstream.yml'
- 'docs/**'
env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
jobs:
docs-yaml:
runs-on: ubuntu-24.04
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Build reference YAML docs
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
targets: update-docs
provenance: false
@@ -65,7 +62,7 @@ jobs:
retention-days: 1
validate:
uses: docker/docs/.github/workflows/validate-upstream.yml@main
uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
needs:
- docs-yaml
with:

View File

@@ -26,25 +26,23 @@ on:
- 'docs/**'
env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
DESTDIR: "./bin"
K3S_VERSION: "v1.32.2+k3s1"
K3S_VERSION: "v1.21.2-k3s1"
jobs:
build:
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v4
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Build
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
targets: binaries
set: |
@@ -65,7 +63,7 @@ jobs:
retention-days: 7
driver:
runs-on: ubuntu-24.04
runs-on: ubuntu-20.04
needs:
- build
strategy:
@@ -153,7 +151,7 @@ jobs:
-
name: Install k3s
if: matrix.driver == 'kubernetes'
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
with:
version: ${{ env.K3S_VERSION }}
-
@@ -177,78 +175,3 @@ jobs:
DRIVER_OPT: ${{ matrix.driver-opt }}
ENDPOINT: ${{ matrix.endpoint }}
PLATFORMS: ${{ matrix.platforms }}
bake:
runs-on: ubuntu-24.04
needs:
- build
env:
DOCKER_BUILD_CHECKS_ANNOTATIONS: false
DOCKER_BUILD_SUMMARY: false
strategy:
fail-fast: false
matrix:
include:
-
# https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
overrides: |
*.output=/tmp/bake-build
-
# https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
targets: |
ld64-static-tgz
overrides: |
ld64-static-tgz.output=type=local,dest=./dist
ld64-static-tgz.platform=linux/amd64
ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
-
# https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
targets: |
tests-buildkit
envs: |
BUILDKIT_REFS=v0.18.2
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v3
-
name: Environment variables
if: matrix.envs != ''
run: |
for l in "${{ matrix.envs }}"; do
echo "${l?}" >> $GITHUB_ENV
done
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Install buildx
uses: actions/download-artifact@v4
with:
name: binary
path: /home/runner/.docker/cli-plugins
-
name: Fix perms and check
run: |
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
docker buildx version
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
source: ${{ matrix.source }}
targets: ${{ matrix.targets }}
set: ${{ matrix.overrides }}

View File

@@ -1,17 +0,0 @@
name: pr-assign-author
permissions:
contents: read
on:
pull_request_target:
types:
- opened
- reopened
jobs:
run:
uses: crazy-max/.github/.github/workflows/pr-assign-author.yml@c27924b5b93ccfe6dcc0d7b22e779ef3c05f9a92
permissions:
contents: read
pull-requests: write

View File

@@ -25,10 +25,6 @@ on:
paths-ignore:
- '.github/releases.json'
env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
jobs:
prepare:
runs-on: ubuntu-24.04
@@ -94,16 +90,17 @@ jobs:
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
fi
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Validate
uses: docker/bake-action@v6
uses: docker/bake-action@v5
with:
targets: ${{ matrix.target }}
set: |

View File

@@ -1,6 +1,9 @@
run:
timeout: 30m
modules-download-mode: vendor
# default uses Go version from the go.mod file, fallback on the env var
# `GOVERSION`, fallback on 1.17: https://golangci-lint.run/usage/configuration/#run-configuration
go: "1.23"
linters:
enable:
@@ -40,9 +43,6 @@ linters-settings:
# buildkit errdefs package (or vice-versa).
- pkg: "github.com/containerd/errdefs"
alias: "cerrdefs"
# Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
- pkg: "github.com/docker/docker/client"
alias: "dockerclient"
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
alias: "ocispecs"
- pkg: "github.com/opencontainers/go-digest"

View File

@@ -1,27 +1,23 @@
# syntax=docker/dockerfile:1
ARG GO_VERSION=1.23
ARG ALPINE_VERSION=3.21
ARG XX_VERSION=1.6.1
ARG XX_VERSION=1.5.0
# for testing
ARG DOCKER_VERSION=28.1.0
ARG DOCKER_VERSION_ALT_27=27.5.1
ARG DOCKER_VERSION=27.4.0-rc.2
ARG DOCKER_VERSION_ALT_26=26.1.3
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
ARG GOTESTSUM_VERSION=v1.12.0
ARG REGISTRY_VERSION=3.0.0
ARG BUILDKIT_VERSION=v0.21.0
ARG UNDOCK_VERSION=0.9.0
ARG REGISTRY_VERSION=2.8.3
ARG BUILDKIT_VERSION=v0.17.2
ARG UNDOCK_VERSION=0.8.0
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt
FROM registry:$REGISTRY_VERSION AS registry
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
FROM crazymax/undock:$UNDOCK_VERSION AS undock
@@ -105,7 +101,6 @@ COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
FROM binaries-unix AS binaries-darwin
FROM binaries-unix AS binaries-freebsd
FROM binaries-unix AS binaries-linux
FROM binaries-unix AS binaries-netbsd
FROM binaries-unix AS binaries-openbsd
FROM scratch AS binaries-windows
@@ -131,21 +126,19 @@ COPY --link --from=gotestsum /out /usr/bin/
COPY --link --from=registry /bin/registry /usr/bin/
COPY --link --from=docker-engine / /usr/bin/
COPY --link --from=docker-cli / /usr/bin/
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
COPY --link --from=docker-engine-alt / /opt/docker-alt-26/
COPY --link --from=docker-cli-alt / /opt/docker-alt-26/
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
COPY --link --from=binaries /buildx /usr/bin/
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
ENV TEST_DOCKER_EXTRA="docker@26.1=/opt/docker-alt-26"
FROM integration-test-base AS integration-test
COPY . .
# Release
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
FROM --platform=$BUILDPLATFORM alpine AS releaser
WORKDIR /work
ARG TARGETPLATFORM
RUN --mount=from=binaries \
@@ -160,7 +153,7 @@ COPY --from=releaser /out/ /
# Shell
FROM docker:$DOCKER_VERSION AS dockerd-release
FROM alpine:${ALPINE_VERSION} AS shell
FROM alpine AS shell
RUN apk add --no-cache iptables tmux git vim less openssh
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin

View File

@@ -21,7 +21,7 @@
- [Verify essential information](#verify-essential-information)
- [Classify the issue](#classify-the-issue)
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
- [Issue lifecycle](#issue-lifecycle)
- [Issue lifecyle](#issue-lifecyle)
- [Examples](#examples)
- [Submitting a bug](#submitting-a-bug)
- [Pull request review process](#pull-request-review-process)
@@ -308,7 +308,7 @@ Examples:
- Bugs in non-default configurations
- Most enhancements
## Issue lifecycle
## Issue lifecyle
```mermaid
flowchart LR

View File

@@ -2,10 +2,7 @@ package bake
import (
"context"
"encoding"
"encoding/json"
"io"
"maps"
"os"
"path"
"path/filepath"
@@ -29,7 +26,9 @@ import (
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/util/entitlements"
"github.com/pkg/errors"
"github.com/tonistiigi/go-csvvalue"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
@@ -47,7 +46,6 @@ type File struct {
type Override struct {
Value string
ArrValue []string
Append bool
}
func defaultFilenames() []string {
@@ -55,8 +53,8 @@ func defaultFilenames() []string {
names = append(names, composecli.DefaultFileNames...)
names = append(names, []string{
"docker-bake.json",
"docker-bake.hcl",
"docker-bake.override.json",
"docker-bake.hcl",
"docker-bake.override.hcl",
}...)
return names
@@ -209,9 +207,8 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
if err != nil {
return nil, nil, err
}
targetsMap := map[string]*Target{}
groupsMap := map[string]*Group{}
m := map[string]*Target{}
n := map[string]*Group{}
for _, target := range targets {
ts, gs := c.ResolveGroup(target)
for _, tname := range ts {
@@ -220,13 +217,13 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
return nil, nil, err
}
if t != nil {
targetsMap[tname] = t
m[tname] = t
}
}
for _, gname := range gs {
for _, group := range c.Groups {
if group.Name == gname {
groupsMap[gname] = group
n[gname] = group
break
}
}
@@ -234,26 +231,25 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
}
for _, target := range targets {
if _, ok := groupsMap["default"]; ok && target == "default" {
if target == "default" {
continue
}
if _, ok := groupsMap["default"]; !ok {
groupsMap["default"] = &Group{Name: "default"}
if _, ok := n["default"]; !ok {
n["default"] = &Group{Name: "default"}
}
groupsMap["default"].Targets = append(groupsMap["default"].Targets, target)
n["default"].Targets = append(n["default"].Targets, target)
}
if g, ok := groupsMap["default"]; ok {
if g, ok := n["default"]; ok {
g.Targets = dedupSlice(g.Targets)
sort.Strings(g.Targets)
}
for name, t := range targetsMap {
if err := c.loadLinks(name, t, targetsMap, o, nil, ent); err != nil {
for name, t := range m {
if err := c.loadLinks(name, t, m, o, nil, ent); err != nil {
return nil, nil, err
}
}
return targetsMap, groupsMap, nil
return m, n, nil
}
func dedupSlice(s []string) []string {
@@ -488,9 +484,11 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
if target == name {
return errors.Errorf("target %s cannot link to itself", target)
}
if slices.Contains(visited, target) {
for _, v := range visited {
if v == target {
return errors.Errorf("infinite loop from %s to %s", name, target)
}
}
t2, ok := m[target]
if !ok {
var err error
@@ -498,9 +496,7 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
if err != nil {
return err
}
t2.Outputs = []*buildflags.ExportEntry{
{Type: "cacheonly"},
}
t2.Outputs = []string{"type=cacheonly"}
t2.linked = true
m[target] = t2
}
@@ -516,8 +512,8 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
}
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
if !isSubset(t.Platforms, t2.Platforms) {
return errors.Errorf("target %s can't be used by %s because its platforms %v are not a subset of %v", target, name, t.Platforms, t2.Platforms)
if !sliceEqual(t.Platforms, t2.Platforms) {
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
}
}
}
@@ -529,12 +525,9 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
m := map[string]map[string]Override{}
for _, v := range v {
parts := strings.SplitN(v, "=", 2)
skey := strings.TrimSuffix(parts[0], "+")
appendTo := strings.HasSuffix(parts[0], "+")
keys := strings.SplitN(skey, ".", 3)
keys := strings.SplitN(parts[0], ".", 3)
if len(keys) < 2 {
return nil, errors.Errorf("invalid override key %s, expected target.name", skey)
return nil, errors.Errorf("invalid override key %s, expected target.name", parts[0])
}
pattern := keys[0]
@@ -547,7 +540,8 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
return nil, err
}
okey := strings.Join(keys[1:], ".")
kk := strings.SplitN(parts[0], ".", 2)
for _, name := range names {
t, ok := m[name]
if !ok {
@@ -555,15 +549,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
m[name] = t
}
override := t[okey]
o := t[kk[1]]
// IMPORTANT: if you add more fields here, do not forget to update
// docs/reference/buildx_bake.md (--set) and https://docs.docker.com/build/bake/overrides/
switch keys[1] {
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network", "annotations":
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
if len(parts) == 2 {
override.Append = appendTo
override.ArrValue = append(override.ArrValue, parts[1])
o.ArrValue = append(o.ArrValue, parts[1])
}
case "args":
if len(keys) != 3 {
@@ -574,7 +565,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
if !ok {
continue
}
override.Value = v
o.Value = v
}
fallthrough
case "contexts":
@@ -584,11 +575,11 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
fallthrough
default:
if len(parts) == 2 {
override.Value = parts[1]
o.Value = parts[1]
}
}
t[okey] = override
t[kk[1]] = o
}
}
return m, nil
@@ -705,7 +696,7 @@ type Target struct {
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
Attest buildflags.Attests `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
@@ -713,19 +704,19 @@ type Target struct {
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
CacheFrom buildflags.CacheOptions `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
CacheTo buildflags.CacheOptions `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
Secrets buildflags.Secrets `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
SSH buildflags.SSHKeys `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
Outputs buildflags.Exports `json:"output,omitempty" hcl:"output,optional" cty:"output"`
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional" cty:"shm-size"`
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional" cty:"ulimits"`
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
@@ -734,41 +725,6 @@ type Target struct {
linked bool
}
func (t *Target) MarshalJSON() ([]byte, error) {
tgt := *t
esc := func(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "${", "$${"), "%{", "%%{")
}
tgt.Annotations = slices.Clone(t.Annotations)
for i, v := range tgt.Annotations {
tgt.Annotations[i] = esc(v)
}
if tgt.DockerfileInline != nil {
escaped := esc(*tgt.DockerfileInline)
tgt.DockerfileInline = &escaped
}
tgt.Labels = maps.Clone(t.Labels)
for k, v := range t.Labels {
if v != nil {
escaped := esc(*v)
tgt.Labels[k] = &escaped
}
}
tgt.Args = maps.Clone(t.Args)
for k, v := range t.Args {
if v != nil {
escaped := esc(*v)
tgt.Args[k] = &escaped
}
}
return json.Marshal(tgt)
}
var (
_ hclparser.WithEvalContexts = &Target{}
_ hclparser.WithGetName = &Target{}
@@ -777,23 +733,23 @@ var (
)
func (t *Target) normalize() {
t.Annotations = removeDupesStr(t.Annotations)
t.Attest = t.Attest.Normalize()
t.Tags = removeDupesStr(t.Tags)
t.Secrets = t.Secrets.Normalize()
t.SSH = t.SSH.Normalize()
t.Platforms = removeDupesStr(t.Platforms)
t.CacheFrom = t.CacheFrom.Normalize()
t.CacheTo = t.CacheTo.Normalize()
t.Outputs = t.Outputs.Normalize()
t.NoCacheFilter = removeDupesStr(t.NoCacheFilter)
t.Ulimits = removeDupesStr(t.Ulimits)
t.Annotations = removeDupes(t.Annotations)
t.Attest = removeAttestDupes(t.Attest)
t.Tags = removeDupes(t.Tags)
t.Secrets = removeDupes(t.Secrets)
t.SSH = removeDupes(t.SSH)
t.Platforms = removeDupes(t.Platforms)
t.CacheFrom = removeDupes(t.CacheFrom)
t.CacheTo = removeDupes(t.CacheTo)
t.Outputs = removeDupes(t.Outputs)
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
t.Ulimits = removeDupes(t.Ulimits)
if t.NetworkMode != nil && *t.NetworkMode == "host" {
t.Entitlements = append(t.Entitlements, "network.host")
}
t.Entitlements = removeDupesStr(t.Entitlements)
t.Entitlements = removeDupes(t.Entitlements)
for k, v := range t.Contexts {
if v == "" {
@@ -852,19 +808,20 @@ func (t *Target) Merge(t2 *Target) {
t.Annotations = append(t.Annotations, t2.Annotations...)
}
if t2.Attest != nil { // merge
t.Attest = t.Attest.Merge(t2.Attest)
t.Attest = append(t.Attest, t2.Attest...)
t.Attest = removeAttestDupes(t.Attest)
}
if t2.Secrets != nil { // merge
t.Secrets = t.Secrets.Merge(t2.Secrets)
t.Secrets = append(t.Secrets, t2.Secrets...)
}
if t2.SSH != nil { // merge
t.SSH = t.SSH.Merge(t2.SSH)
t.SSH = append(t.SSH, t2.SSH...)
}
if t2.Platforms != nil { // no merge
t.Platforms = t2.Platforms
}
if t2.CacheFrom != nil { // merge
t.CacheFrom = t.CacheFrom.Merge(t2.CacheFrom)
t.CacheFrom = append(t.CacheFrom, t2.CacheFrom...)
}
if t2.CacheTo != nil { // no merge
t.CacheTo = t2.CacheTo
@@ -900,8 +857,6 @@ func (t *Target) Merge(t2 *Target) {
}
func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementConf) error {
// IMPORTANT: if you add more fields here, do not forget to update
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
for key, o := range overrides {
value := o.Value
keys := strings.SplitN(key, ".", 2)
@@ -935,22 +890,14 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
}
t.Labels[keys[1]] = &value
case "tags":
if o.Append {
t.Tags = append(t.Tags, o.ArrValue...)
} else {
t.Tags = o.ArrValue
}
case "cache-from":
t.CacheFrom = o.ArrValue
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
if err != nil {
return err
}
if o.Append {
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
} else {
t.CacheFrom = cacheFrom
}
for _, c := range t.CacheFrom {
for _, c := range cacheFrom {
if c.Type == "local" {
if v, ok := c.Attrs["src"]; ok {
ent.FSRead = append(ent.FSRead, v)
@@ -958,16 +905,12 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
}
}
case "cache-to":
t.CacheTo = o.ArrValue
cacheTo, err := buildflags.ParseCacheEntry(o.ArrValue)
if err != nil {
return err
}
if o.Append {
t.CacheTo = t.CacheTo.Merge(cacheTo)
} else {
t.CacheTo = cacheTo
}
for _, c := range t.CacheTo {
for _, c := range cacheTo {
if c.Type == "local" {
if v, ok := c.Attrs["dest"]; ok {
ent.FSWrite = append(ent.FSWrite, v)
@@ -979,50 +922,34 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
case "call":
t.Call = &value
case "secrets":
secrets, err := parseArrValue[buildflags.Secret](o.ArrValue)
t.Secrets = o.ArrValue
secrets, err := buildflags.ParseSecretSpecs(o.ArrValue)
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.Secrets = t.Secrets.Merge(secrets)
} else {
t.Secrets = secrets
}
for _, s := range t.Secrets {
for _, s := range secrets {
if s.FilePath != "" {
ent.FSRead = append(ent.FSRead, s.FilePath)
}
}
case "ssh":
ssh, err := parseArrValue[buildflags.SSH](o.ArrValue)
t.SSH = o.ArrValue
ssh, err := buildflags.ParseSSHSpecs(o.ArrValue)
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.SSH = t.SSH.Merge(ssh)
} else {
t.SSH = ssh
}
for _, s := range t.SSH {
for _, s := range ssh {
ent.FSRead = append(ent.FSRead, s.Paths...)
}
case "platform":
if o.Append {
t.Platforms = append(t.Platforms, o.ArrValue...)
} else {
t.Platforms = o.ArrValue
}
case "output":
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
t.Outputs = o.ArrValue
outputs, err := buildflags.ParseExports(o.ArrValue)
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.Outputs = t.Outputs.Merge(outputs)
} else {
t.Outputs = outputs
}
for _, o := range t.Outputs {
for _, o := range outputs {
if o.Destination != "" {
ent.FSWrite = append(ent.FSWrite, o.Destination)
}
@@ -1039,11 +966,7 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
case "annotations":
t.Annotations = append(t.Annotations, o.ArrValue...)
case "attest":
attest, err := parseArrValue[buildflags.Attest](o.ArrValue)
if err != nil {
return errors.Wrap(err, "invalid value for attest")
}
t.Attest = t.Attest.Merge(attest)
t.Attest = append(t.Attest, o.ArrValue...)
case "no-cache":
noCache, err := strconv.ParseBool(value)
if err != nil {
@@ -1051,19 +974,11 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
}
t.NoCache = &noCache
case "no-cache-filter":
if o.Append {
t.NoCacheFilter = append(t.NoCacheFilter, o.ArrValue...)
} else {
t.NoCacheFilter = o.ArrValue
}
case "shm-size":
t.ShmSize = &value
case "ulimits":
if o.Append {
t.Ulimits = append(t.Ulimits, o.ArrValue...)
} else {
t.Ulimits = o.ArrValue
}
case "network":
t.NetworkMode = &value
case "pull":
@@ -1141,7 +1056,9 @@ func (t *Target) GetEvalContexts(ectx *hcl.EvalContext, block *hcl.Block, loadDe
e2 := ectx.NewChild()
e2.Variables = make(map[string]cty.Value)
if e != ectx {
maps.Copy(e2.Variables, e.Variables)
for k, v := range e.Variables {
e2.Variables[k] = v
}
}
e2.Variables[k] = v
ectxs2 = append(ectxs2, e2)
@@ -1202,9 +1119,7 @@ func (t *Target) GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
// make sure local credentials are loaded multiple times for different targets
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
authProvider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
ConfigFile: dockerConfig,
})
authProvider := authprovider.NewDockerAuthProvider(dockerConfig, nil)
m2 := make(map[string]build.Options, len(m))
for k, v := range m {
@@ -1256,16 +1171,6 @@ func updateContext(t *build.Inputs, inp *Input) {
t.ContextState = &st
}
func isRemoteContext(t build.Inputs, inp *Input) bool {
if build.IsRemoteURL(t.ContextPath) {
return true
}
if inp != nil && build.IsRemoteURL(inp.URL) && !strings.HasPrefix(t.ContextPath, "cwd://") {
return true
}
return false
}
func collectLocalPaths(t build.Inputs) []string {
var out []string
if t.ContextState == nil {
@@ -1360,8 +1265,8 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
if strings.HasPrefix(bi.ContextPath, "cwd://") {
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
}
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !filepath.IsAbs(bi.DockerfilePath) {
bi.DockerfilePath = filepath.Join(bi.ContextPath, bi.DockerfilePath)
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !path.IsAbs(bi.DockerfilePath) {
bi.DockerfilePath = path.Join(bi.ContextPath, bi.DockerfilePath)
}
for k, v := range bi.NamedContexts {
if strings.HasPrefix(v.Path, "cwd://") {
@@ -1425,35 +1330,28 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
}
bo.Platforms = platforms
secrets := t.Secrets
if isRemoteContext(bi, inp) {
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
secrets = append(secrets, &buildflags.Secret{
ID: llb.GitAuthTokenKey,
Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
})
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
if err != nil {
return nil, err
}
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
secrets = append(secrets, &buildflags.Secret{
ID: llb.GitAuthHeaderKey,
Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
})
}
}
secrets = secrets.Normalize()
bo.SecretSpecs = secrets.ToPB()
secretAttachment, err := controllerapi.CreateSecrets(bo.SecretSpecs)
bo.SecretSpecs = secrets
secretAttachment, err := controllerapi.CreateSecrets(secrets)
if err != nil {
return nil, err
}
bo.Session = append(bo.Session, secretAttachment)
bo.SSHSpecs = t.SSH.ToPB()
if len(bo.SSHSpecs) == 0 && buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL)) {
bo.SSHSpecs = []*controllerapi.SSH{{ID: "default"}}
sshSpecs, err := buildflags.ParseSSHSpecs(t.SSH)
if err != nil {
return nil, err
}
if len(sshSpecs) == 0 && (buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL))) {
sshSpecs = append(sshSpecs, &controllerapi.SSH{ID: "default"})
}
bo.SSHSpecs = sshSpecs
sshAttachment, err := controllerapi.CreateSSH(bo.SSHSpecs)
sshAttachment, err := controllerapi.CreateSSH(sshSpecs)
if err != nil {
return nil, err
}
@@ -1469,14 +1367,24 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
}
}
if t.CacheFrom != nil {
bo.CacheFrom = controllerapi.CreateCaches(t.CacheFrom.ToPB())
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
if err != nil {
return nil, err
}
if t.CacheTo != nil {
bo.CacheTo = controllerapi.CreateCaches(t.CacheTo.ToPB())
bo.CacheFrom = controllerapi.CreateCaches(cacheImports)
cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
if err != nil {
return nil, err
}
bo.CacheTo = controllerapi.CreateCaches(cacheExports)
outputs, err := buildflags.ParseExports(t.Outputs)
if err != nil {
return nil, err
}
bo.Exports, bo.ExportsLocalPathsTemporary, err = controllerapi.CreateExports(t.Outputs.ToPB())
bo.Exports, bo.ExportsLocalPathsTemporary, err = controllerapi.CreateExports(outputs)
if err != nil {
return nil, err
}
@@ -1491,7 +1399,11 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
}
}
bo.Attests = controllerapi.CreateAttestations(t.Attest.ToPB())
attests, err := buildflags.ParseAttests(t.Attest)
if err != nil {
return nil, err
}
bo.Attests = controllerapi.CreateAttestations(attests)
bo.SourcePolicy, err = build.ReadSourcePolicy()
if err != nil {
@@ -1506,7 +1418,9 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
}
bo.Ulimits = ulimits
bo.Allow = append(bo.Allow, t.Entitlements...)
for _, ent := range t.Entitlements {
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
}
return bo, nil
}
@@ -1515,7 +1429,7 @@ func defaultTarget() *Target {
return &Target{}
}
func removeDupesStr(s []string) []string {
func removeDupes(s []string) []string {
i := 0
seen := make(map[string]struct{}, len(s))
for _, v := range s {
@@ -1532,76 +1446,106 @@ func removeDupesStr(s []string) []string {
return s[:i]
}
func setPushOverride(outputs []*buildflags.ExportEntry, push bool) []*buildflags.ExportEntry {
if !push {
// Disable push for any relevant export types
for i := 0; i < len(outputs); {
output := outputs[i]
switch output.Type {
case "registry":
// Filter out registry output type
outputs[i], outputs[len(outputs)-1] = outputs[len(outputs)-1], outputs[i]
outputs = outputs[:len(outputs)-1]
func removeAttestDupes(s []string) []string {
res := []string{}
m := map[string]int{}
for _, v := range s {
att, err := buildflags.ParseAttest(v)
if err != nil {
res = append(res, v)
continue
case "image":
// Override push attribute
output.Attrs["push"] = "false"
}
i++
}
return outputs
}
// Force push to be enabled
setPush := true
for _, output := range outputs {
if output.Type != "docker" {
// If there is an output type that is not docker, don't set "push"
setPush = false
}
// Set push attribute for image
if output.Type == "image" {
output.Attrs["push"] = "true"
if i, ok := m[att.Type]; ok {
res[i] = v
} else {
m[att.Type] = len(res)
res = append(res, v)
}
}
if setPush {
// No existing output that pushes so add one
outputs = append(outputs, &buildflags.ExportEntry{
Type: "image",
Attrs: map[string]string{
"push": "true",
},
})
}
return outputs
return res
}
func setLoadOverride(outputs []*buildflags.ExportEntry, load bool) []*buildflags.ExportEntry {
func parseOutput(str string) map[string]string {
fields, err := csvvalue.Fields(str, nil)
if err != nil {
return nil
}
res := map[string]string{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 2 {
res[parts[0]] = parts[1]
}
}
return res
}
func parseOutputType(str string) string {
if out := parseOutput(str); out != nil {
if v, ok := out["type"]; ok {
return v
}
}
return ""
}
func setPushOverride(outputs []string, push bool) []string {
var out []string
setPush := true
for _, output := range outputs {
typ := parseOutputType(output)
if typ == "image" || typ == "registry" {
// no need to set push if image or registry types already defined
setPush = false
if typ == "registry" {
if !push {
// don't set registry output if "push" is false
continue
}
// no need to set "push" attribute to true for registry
out = append(out, output)
continue
}
out = append(out, output+",push="+strconv.FormatBool(push))
} else {
if typ != "docker" {
// if there is any output that is not docker, don't set "push"
setPush = false
}
out = append(out, output)
}
}
if push && setPush {
out = append(out, "type=image,push=true")
}
return out
}
func setLoadOverride(outputs []string, load bool) []string {
if !load {
return outputs
}
setLoad := true
for _, output := range outputs {
switch output.Type {
case "docker":
// if dest is not set, we can reuse this entry and do not need to set load
if output.Destination == "" {
return outputs
if typ := parseOutputType(output); typ == "docker" {
if v := parseOutput(output); v != nil {
// dest set means we want to output as tar so don't set load
if _, ok := v["dest"]; !ok {
setLoad = false
break
}
case "image", "registry", "oci":
// Ignore
default:
}
} else if typ != "image" && typ != "registry" && typ != "oci" {
// if there is any output that is not an image, registry
// or oci, don't set "load" similar to push override
return outputs
setLoad = false
break
}
}
outputs = append(outputs, &buildflags.ExportEntry{
Type: "docker",
})
if setLoad {
outputs = append(outputs, "type=docker")
}
return outputs
}
@@ -1619,9 +1563,14 @@ func sanitizeTargetName(target string) string {
return strings.ReplaceAll(target, ".", "_")
}
func isSubset(s1, s2 []string) bool {
for _, item := range s1 {
if !slices.Contains(s2, item) {
func sliceEqual(s1, s2 []string) bool {
if len(s1) != len(s2) {
return false
}
sort.Strings(s1)
sort.Strings(s2)
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
@@ -1635,24 +1584,3 @@ func toNamedContexts(m map[string]string) map[string]build.NamedContext {
}
return m2
}
type arrValue[B any] interface {
encoding.TextUnmarshaler
*B
}
func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) {
outputs := make([]*T, 0, len(s))
for _, text := range s {
if text == "" {
continue
}
output := new(T)
if err := PT(output).UnmarshalText([]byte(text)); err != nil {
return nil, err
}
outputs = append(outputs, output)
}
return outputs, nil
}

View File

@@ -2,14 +2,12 @@ package bake
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"github.com/docker/buildx/util/buildflags"
"github.com/moby/buildkit/util/entitlements"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -34,18 +32,6 @@ target "webapp" {
args = {
VAR_BOTH = "webapp"
}
annotations = [
"index,manifest:org.opencontainers.image.authors=dvdksn"
]
attest = [
"type=provenance,mode=max"
]
platforms = [
"linux/amd64"
]
secret = [
"id=FOO,env=FOO"
]
inherits = ["webDEP"]
}`),
}
@@ -127,31 +113,6 @@ target "webapp" {
})
})
t.Run("AnnotationsOverrides", func(t *testing.T) {
t.Parallel()
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.annotations=index,manifest:org.opencontainers.image.vendor=docker"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"index,manifest:org.opencontainers.image.authors=dvdksn", "index,manifest:org.opencontainers.image.vendor=docker"}, m["webapp"].Annotations)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
})
t.Run("AttestOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest=type=sbom"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Attest, 2)
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
})
t.Run("AttestAppend", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest+=type=sbom"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Attest, 2)
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
})
t.Run("ContextOverride", func(t *testing.T) {
t.Parallel()
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil, &EntitlementConf{})
@@ -173,49 +134,6 @@ target "webapp" {
require.Equal(t, []string{"webapp"}, g["default"].Targets)
})
t.Run("PlatformOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform=linux/arm64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/arm64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppend", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppendMulti", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform+=linux/riscv64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppendMultiLastOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform=linux/riscv64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
})
t.Run("SecretsOverride", func(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "bar")
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets=id=BAR,env=BAR"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Secrets, 1)
require.Equal(t, "BAR", m["webapp"].Secrets[0].ID)
})
t.Run("SecretsAppend", func(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "bar")
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets+=id=BAR,env=BAR"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Secrets, 2)
require.Equal(t, "FOO", m["webapp"].Secrets[0].ID)
require.Equal(t, "BAR", m["webapp"].Secrets[1].ID)
})
t.Run("ShmSizeOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.shm-size=256m"}, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -310,7 +228,7 @@ func TestPushOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,push=true", m["app"].Outputs[0].String())
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
})
t.Run("type image", func(t *testing.T) {
@@ -324,7 +242,7 @@ func TestPushOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0].String())
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
})
t.Run("type image push false", func(t *testing.T) {
@@ -338,7 +256,7 @@ func TestPushOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0].String())
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
})
t.Run("type registry", func(t *testing.T) {
@@ -352,7 +270,7 @@ func TestPushOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=registry", m["app"].Outputs[0].String())
require.Equal(t, "type=registry", m["app"].Outputs[0])
})
t.Run("type registry push false", func(t *testing.T) {
@@ -382,9 +300,9 @@ func TestPushOverride(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, len(m))
require.Equal(t, 1, len(m["foo"].Outputs))
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
require.Equal(t, 1, len(m["bar"].Outputs))
require.Equal(t, []string{"type=image,push=true"}, stringify(m["bar"].Outputs))
require.Equal(t, []string{"type=image,push=true"}, m["bar"].Outputs)
})
}
@@ -399,7 +317,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=docker", m["app"].Outputs[0].String())
require.Equal(t, "type=docker", m["app"].Outputs[0])
})
t.Run("type docker", func(t *testing.T) {
@@ -413,7 +331,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, []string{"type=docker"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=docker"}, m["app"].Outputs)
})
t.Run("type image", func(t *testing.T) {
@@ -427,7 +345,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 2, len(m["app"].Outputs))
require.Equal(t, []string{"type=docker", "type=image"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=image", "type=docker"}, m["app"].Outputs)
})
t.Run("type image load false", func(t *testing.T) {
@@ -441,7 +359,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, []string{"type=image"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=image"}, m["app"].Outputs)
})
t.Run("type registry", func(t *testing.T) {
@@ -455,7 +373,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 2, len(m["app"].Outputs))
require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=registry", "type=docker"}, m["app"].Outputs)
})
t.Run("type oci", func(t *testing.T) {
@@ -469,7 +387,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 2, len(m["app"].Outputs))
require.Equal(t, []string{"type=docker", "type=oci,dest=out"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=oci,dest=out", "type=docker"}, m["app"].Outputs)
})
t.Run("type docker with dest", func(t *testing.T) {
@@ -483,7 +401,7 @@ func TestLoadOverride(t *testing.T) {
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 2, len(m["app"].Outputs))
require.Equal(t, []string{"type=docker", "type=docker,dest=out"}, stringify(m["app"].Outputs))
require.Equal(t, []string{"type=docker,dest=out", "type=docker"}, m["app"].Outputs)
})
t.Run("type local and empty target", func(t *testing.T) {
@@ -500,9 +418,9 @@ func TestLoadOverride(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, len(m))
require.Equal(t, 1, len(m["foo"].Outputs))
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
require.Equal(t, 1, len(m["bar"].Outputs))
require.Equal(t, []string{"type=docker"}, stringify(m["bar"].Outputs))
require.Equal(t, []string{"type=docker"}, m["bar"].Outputs)
})
}
@@ -522,10 +440,12 @@ func TestLoadAndPushOverride(t *testing.T) {
require.Equal(t, 2, len(m))
require.Equal(t, 1, len(m["foo"].Outputs))
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
sort.Strings(m["foo"].Outputs)
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
require.Equal(t, 2, len(m["bar"].Outputs))
require.Equal(t, []string{"type=docker", "type=image,push=true"}, stringify(m["bar"].Outputs))
sort.Strings(m["bar"].Outputs)
require.Equal(t, []string{"type=docker", "type=image,push=true"}, m["bar"].Outputs)
})
t.Run("type registry", func(t *testing.T) {
@@ -541,7 +461,8 @@ func TestLoadAndPushOverride(t *testing.T) {
require.Equal(t, 1, len(m))
require.Equal(t, 2, len(m["foo"].Outputs))
require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["foo"].Outputs))
sort.Strings(m["foo"].Outputs)
require.Equal(t, []string{"type=docker", "type=registry"}, m["foo"].Outputs)
})
}
@@ -692,7 +613,7 @@ func TestHCLContextCwdPrefix(t *testing.T) {
require.Contains(t, m, "app")
assert.Equal(t, "test", *m["app"].Dockerfile)
assert.Equal(t, "foo", *m["app"].Context)
assert.Equal(t, filepath.Clean("foo/test"), bo["app"].Inputs.DockerfilePath)
assert.Equal(t, "foo/test", bo["app"].Inputs.DockerfilePath)
assert.Equal(t, "foo", bo["app"].Inputs.ContextPath)
}
@@ -753,7 +674,7 @@ func TestOverrideMerge(t *testing.T) {
require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=registry", m["app"].Outputs[0].String())
require.Equal(t, "type=registry", m["app"].Outputs[0])
}
func TestReadContexts(t *testing.T) {
@@ -919,7 +840,7 @@ func TestReadContextFromTargetChain(t *testing.T) {
mid, ok := m["mid"]
require.True(t, ok)
require.Equal(t, 1, len(mid.Outputs))
require.Equal(t, "type=cacheonly", mid.Outputs[0].String())
require.Equal(t, "type=cacheonly", mid.Outputs[0])
require.Equal(t, 1, len(mid.Contexts))
base, ok := m["base"]
@@ -991,28 +912,7 @@ func TestReadContextFromTargetInvalidPlatforms(t *testing.T) {
}
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "are not a subset of")
}
func TestReadContextFromTargetSubsetPlatforms(t *testing.T) {
ctx := context.TODO()
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
target "mid" {
output = ["foo"]
platforms = ["linux/amd64", "linux/riscv64", "linux/arm64"]
}
target "app" {
contexts = {
bar: "target:mid"
}
platforms = ["linux/amd64", "linux/arm64"]
}
`),
}
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Contains(t, err.Error(), "defined for different platforms")
}
func TestReadTargetsDefault(t *testing.T) {
@@ -1024,12 +924,11 @@ func TestReadTargetsDefault(t *testing.T) {
Data: []byte(`
target "default" {
dockerfile = "test"
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, 0, len(g))
require.Equal(t, 1, len(m))
require.Equal(t, "test", *m["default"].Dockerfile)
}
@@ -1043,8 +942,7 @@ func TestReadTargetsSpecified(t *testing.T) {
Data: []byte(`
target "image" {
dockerfile = "test"
}`),
}
}`)}
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
@@ -1069,8 +967,7 @@ group "foo" {
}
target "image" {
dockerfile = "test"
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1096,8 +993,7 @@ target "foo" {
}
target "image" {
dockerfile = "test"
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1140,8 +1036,7 @@ target "image-release" {
inherits = ["image"]
output = ["type=image,push=true"]
tags = ["user/app:latest"]
}`),
}
}`)}
fyml := File{
Name: "docker-compose.yml",
@@ -1165,8 +1060,7 @@ services:
args:
CT_ECR: foo
CT_TAG: bar
image: ct-fake-aws:bar`),
}
image: ct-fake-aws:bar`)}
fjson := File{
Name: "docker-bake.json",
@@ -1187,8 +1081,7 @@ services:
]
}
}
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1196,7 +1089,7 @@ services:
require.Equal(t, []string{"image"}, g["default"].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, 1, len(m["image"].Outputs))
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
require.Equal(t, "type=docker", m["image"].Outputs[0])
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1204,7 +1097,7 @@ services:
require.Equal(t, []string{"image-release"}, g["default"].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, 1, len(m["image-release"].Outputs))
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1213,7 +1106,7 @@ services:
require.Equal(t, 2, len(m))
require.Equal(t, ".", *m["image"].Context)
require.Equal(t, 1, len(m["image-release"].Outputs))
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1273,8 +1166,7 @@ target "foo" {
}
target "image" {
output = ["type=docker"]
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1308,8 +1200,7 @@ target "foo" {
}
target "image" {
output = ["type=docker"]
}`),
}
}`)}
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1318,7 +1209,7 @@ target "image" {
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
require.Equal(t, "type=docker", m["image"].Outputs[0])
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -1327,7 +1218,7 @@ target "image" {
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
require.Equal(t, "type=docker", m["image"].Outputs[0])
}
func TestNestedInherits(t *testing.T) {
@@ -1356,8 +1247,7 @@ target "c" {
}
target "d" {
inherits = ["b", "c"]
}`),
}
}`)}
cases := []struct {
name string
@@ -1425,8 +1315,7 @@ group "default" {
"child1",
"child2"
]
}`),
}
}`)}
cases := []struct {
name string
@@ -1462,9 +1351,9 @@ group "default" {
require.Equal(t, []string{"child1", "child2"}, g["default"].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, tt.wantch1, m["child1"].Args)
require.Equal(t, []string{"type=docker"}, stringify(m["child1"].Outputs))
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
require.Equal(t, tt.wantch2, m["child2"].Args)
require.Equal(t, []string{"type=docker"}, stringify(m["child2"].Outputs))
require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
})
}
}
@@ -1553,8 +1442,7 @@ group "e" {
target "f" {
context = "./foo"
}`),
}
}`)}
cases := []struct {
names []string
@@ -1790,7 +1678,7 @@ func TestAttestDuplicates(t *testing.T) {
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"default"}, nil, nil, &EntitlementConf{})
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,foo=bar"}, stringify(m["default"].Attest))
require.Equal(t, []string{"type=sbom,foo=bar", "type=provenance,mode=max"}, m["default"].Attest)
require.NoError(t, err)
opts, err := TargetsToBuildOpt(m, &Input{})
@@ -1801,7 +1689,7 @@ func TestAttestDuplicates(t *testing.T) {
}, opts["default"].Attests)
m, _, err = ReadTargets(ctx, []File{fp}, []string{"default"}, []string{"*.attest=type=sbom,disabled=true"}, nil, &EntitlementConf{})
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true"}, stringify(m["default"].Attest))
require.Equal(t, []string{"type=sbom,disabled=true", "type=provenance,mode=max"}, m["default"].Attest)
require.NoError(t, err)
opts, err = TargetsToBuildOpt(m, &Input{})
@@ -1833,34 +1721,13 @@ func TestAnnotations(t *testing.T) {
require.Equal(t, 1, len(m))
require.Contains(t, m, "app")
require.Equal(t, "type=image,name=foo", m["app"].Outputs[0].String())
require.Equal(t, "type=image,name=foo", m["app"].Outputs[0])
require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0])
require.Len(t, bo["app"].Exports, 1)
require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
}
func TestRefOnlyCacheOptions(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(
`target "app" {
output = ["type=image,name=foo"]
cache-from = ["ref1,ref2"]
}`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m, 1)
require.Contains(t, m, "app")
require.Equal(t, buildflags.CacheOptions{
{Type: "registry", Attrs: map[string]string{"ref": "ref1"}},
{Type: "registry", Attrs: map[string]string{"ref": "ref2"}},
}, m["app"].CacheFrom)
}
func TestHCLEntitlements(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
@@ -1886,8 +1753,8 @@ func TestHCLEntitlements(t *testing.T) {
require.Equal(t, "network.host", m["app"].Entitlements[1])
require.Len(t, bo["app"].Allow, 2)
require.Equal(t, entitlements.EntitlementSecurityInsecure.String(), bo["app"].Allow[0])
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[1])
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
}
func TestEntitlementsForNetHostCompose(t *testing.T) {
@@ -1926,7 +1793,7 @@ func TestEntitlementsForNetHostCompose(t *testing.T) {
require.Equal(t, "host", *m["app"].NetworkMode)
require.Len(t, bo["app"].Allow, 1)
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
require.Equal(t, "host", bo["app"].NetworkMode)
}
@@ -1957,7 +1824,7 @@ func TestEntitlementsForNetHost(t *testing.T) {
require.Equal(t, "host", *m["app"].NetworkMode)
require.Len(t, bo["app"].Allow, 1)
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
require.Equal(t, "host", bo["app"].NetworkMode)
}
@@ -2142,73 +2009,6 @@ target "app" {
})
}
func TestVariableValidationConditionNull(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "PORT" {
default = 3000
validation {}
}
target "app" {
args = {
PORT = PORT
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "Condition expression must return either true or false, not null")
}
func TestVariableValidationConditionUnknownValue(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "PORT" {
default = 3000
validation {
condition = "foo"
}
}
target "app" {
args = {
PORT = PORT
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "Invalid condition result value: a bool is required")
}
func TestVariableValidationInvalidErrorMessage(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "FOO" {
default = 0
validation {
condition = FOO > 5
}
}
target "app" {
args = {
FOO = FOO
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "This check failed, but has an invalid error message")
}
// https://github.com/docker/buildx/issues/2822
func TestVariableEmpty(t *testing.T) {
fp := File{
@@ -2224,87 +2024,7 @@ target "app" {
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Contains(t, m, "app")
require.Len(t, m["app"].Outputs, 0)
}
// https://github.com/docker/buildx/issues/2858
func TestOverrideEmpty(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
target "app" {
output = ["./bin"]
}
`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.output="}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Contains(t, m, "app")
require.Len(t, m["app"].Outputs, 0)
}
// https://github.com/docker/buildx/issues/2859
func TestGroupTargetsWithDefault(t *testing.T) {
t.Run("OnTarget", func(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(
`target "default" {
dockerfile = "Dockerfile"
platforms = ["linux/amd64"]
}
target "multiarch" {
dockerfile = "Dockerfile"
platforms = ["linux/amd64","linux/arm64","linux/arm/v7","linux/arm/v6"]
}`),
}
ctx := context.TODO()
_, g, err := ReadTargets(ctx, []File{fp}, []string{"default", "multiarch"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, 2, len(g["default"].Targets))
require.Equal(t, []string{"default", "multiarch"}, g["default"].Targets)
})
t.Run("OnGroup", func(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(
`group "default" {
targets = ["app", "multiarch"]
}
target "app" {
dockerfile = "app.Dockerfile"
}
target "foo" {
dockerfile = "foo.Dockerfile"
}
target "multiarch" {
dockerfile = "Dockerfile"
platforms = ["linux/amd64","linux/arm64","linux/arm/v7","linux/arm/v6"]
}`),
}
ctx := context.TODO()
_, g, err := ReadTargets(ctx, []File{fp}, []string{"default", "foo"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, 3, len(g["default"].Targets))
require.Equal(t, []string{"app", "foo", "multiarch"}, g["default"].Targets)
})
}
func stringify[V fmt.Stringer](values []V) []string {
s := make([]string, len(values))
for i, v := range values {
s[i] = v.String()
}
sort.Strings(s)
return s
}

View File

@@ -5,14 +5,13 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"sort"
"strings"
"github.com/compose-spec/compose-go/v2/consts"
"github.com/compose-spec/compose-go/v2/dotenv"
"github.com/compose-spec/compose-go/v2/loader"
composetypes "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/buildx/util/buildflags"
dockeropts "github.com/docker/cli/opts"
"github.com/docker/go-units"
"github.com/pkg/errors"
@@ -92,9 +91,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
if s.Build.AdditionalContexts != nil {
additionalContexts = map[string]string{}
for k, v := range s.Build.AdditionalContexts {
if strings.HasPrefix(v, "service:") {
v = strings.Replace(v, "service:", "target:", 1)
}
additionalContexts[k] = v
}
}
@@ -123,16 +119,14 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
}
}
var ssh []*buildflags.SSH
var ssh []string
for _, bkey := range s.Build.SSH {
sshkey := composeToBuildkitSSH(bkey)
ssh = append(ssh, sshkey)
}
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
return a.Less(b)
})
sort.Strings(ssh)
var secrets []*buildflags.Secret
var secrets []string
for _, bs := range s.Build.Secrets {
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
if err != nil {
@@ -148,16 +142,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
labels[k] = &v
}
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
if err != nil {
return nil, err
}
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
if err != nil {
return nil, err
}
g.Targets = append(g.Targets, targetName)
t := &Target{
Name: targetName,
@@ -174,10 +158,9 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
val, ok := cfg.Environment[val]
return val, ok
})),
CacheFrom: cacheFrom,
CacheTo: cacheTo,
CacheFrom: s.Build.CacheFrom,
CacheTo: s.Build.CacheTo,
NetworkMode: networkModeP,
Platforms: s.Build.Platforms,
SSH: ssh,
Secrets: secrets,
ShmSize: shmSize,
@@ -218,7 +201,7 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
}
func validateCompose(dt []byte, envs map[string]string) error {
_, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
_, err := loader.Load(composetypes.ConfigDetails{
ConfigFiles: []composetypes.ConfigFile{
{
Content: dt,
@@ -314,12 +297,10 @@ type xbake struct {
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
}
type (
stringMap map[string]string
stringArray []string
)
type stringMap map[string]string
type stringArray []string
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
var multi []string
err := unmarshal(&multi)
if err != nil {
@@ -336,7 +317,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
// composeExtTarget converts Compose build extension x-bake to bake Target
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
func (t *Target) composeExtTarget(exts map[string]any) error {
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
var xb xbake
ext, ok := exts["x-bake"]
@@ -353,45 +334,23 @@ func (t *Target) composeExtTarget(exts map[string]any) error {
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
}
if len(xb.CacheFrom) > 0 {
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
if err != nil {
return err
}
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
}
if len(xb.CacheTo) > 0 {
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
if err != nil {
return err
}
t.CacheTo = t.CacheTo.Merge(cacheTo)
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
}
if len(xb.Secrets) > 0 {
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
if err != nil {
return err
}
t.Secrets = t.Secrets.Merge(secrets)
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
}
if len(xb.SSH) > 0 {
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
if err != nil {
return err
}
t.SSH = t.SSH.Merge(ssh)
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
return a.Less(b)
})
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
sort.Strings(t.SSH)
}
if len(xb.Platforms) > 0 {
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
}
if len(xb.Outputs) > 0 {
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
if err != nil {
return err
}
t.Outputs = t.Outputs.Merge(outputs)
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
}
if xb.Pull != nil {
t.Pull = xb.Pull
@@ -411,30 +370,35 @@ func (t *Target) composeExtTarget(exts map[string]any) error {
// composeToBuildkitSecret converts secret from compose format to buildkit's
// csv format.
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
if psecret.External {
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
}
secret := &buildflags.Secret{}
var bkattrs []string
if inp.Source != "" {
secret.ID = inp.Source
bkattrs = append(bkattrs, "id="+inp.Source)
}
if psecret.File != "" {
secret.FilePath = psecret.File
bkattrs = append(bkattrs, "src="+psecret.File)
}
if psecret.Environment != "" {
secret.Env = psecret.Environment
bkattrs = append(bkattrs, "env="+psecret.Environment)
}
return secret, nil
return strings.Join(bkattrs, ","), nil
}
// composeToBuildkitSSH converts secret from compose format to buildkit's
// csv format.
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
bkssh := &buildflags.SSH{ID: sshKey.ID}
func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
var bkattrs []string
bkattrs = append(bkattrs, sshKey.ID)
if sshKey.Path != "" {
bkssh.Paths = []string{sshKey.Path}
bkattrs = append(bkattrs, sshKey.Path)
}
return bkssh
return strings.Join(bkattrs, "=")
}

View File

@@ -12,7 +12,7 @@ import (
)
func TestParseCompose(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
db:
build: ./db
@@ -33,7 +33,7 @@ services:
cache_to:
- type=local,dest=path/to/cache
ssh:
- key=/path/to/key
- key=path/to/key
- default
secrets:
- token
@@ -74,14 +74,14 @@ secrets:
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
require.Equal(t, 1, len(c.Targets[1].Args))
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
require.Equal(t, "none", *c.Targets[1].NetworkMode)
require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
require.Equal(t, []string{
"id=aws,src=/root/.aws/credentials",
"id=token,env=ENV_TOKEN",
}, stringify(c.Targets[1].Secrets))
"id=aws,src=/root/.aws/credentials",
}, c.Targets[1].Secrets)
require.Equal(t, "webapp2", c.Targets[2].Name)
require.Equal(t, "dir", *c.Targets[2].Context)
@@ -89,7 +89,7 @@ secrets:
}
func TestNoBuildOutOfTreeService(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
external:
image: "verycooldb:1337"
@@ -103,7 +103,7 @@ services:
}
func TestParseComposeTarget(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
db:
build:
@@ -129,7 +129,7 @@ services:
}
func TestComposeBuildWithoutContext(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
db:
build:
@@ -153,7 +153,7 @@ services:
}
func TestBuildArgEnvCompose(t *testing.T) {
dt := []byte(`
var dt = []byte(`
version: "3.8"
services:
example:
@@ -179,7 +179,7 @@ services:
}
func TestInconsistentComposeFile(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
webapp:
entrypoint: echo 1
@@ -190,7 +190,7 @@ services:
}
func TestAdvancedNetwork(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
db:
networks:
@@ -215,7 +215,7 @@ networks:
}
func TestTags(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
example:
image: example
@@ -233,7 +233,7 @@ services:
}
func TestDependsOnList(t *testing.T) {
dt := []byte(`
var dt = []byte(`
version: "3.8"
services:
@@ -269,7 +269,7 @@ networks:
}
func TestComposeExt(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
addon:
image: ct-addon:bar
@@ -283,7 +283,7 @@ services:
tags:
- ct-addon:baz
ssh:
key: /path/to/key
key: path/to/key
args:
CT_ECR: foo
CT_TAG: bar
@@ -336,23 +336,23 @@ services:
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
require.Equal(t, newBool(true), c.Targets[0].Pull)
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
require.Equal(t, newBool(true), c.Targets[1].NoCache)
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
}
func TestComposeExtDedup(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
webapp:
image: app:bar
@@ -383,9 +383,9 @@ services:
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
}
func TestEnv(t *testing.T) {
@@ -396,7 +396,7 @@ func TestEnv(t *testing.T) {
_, err = envf.WriteString("FOO=bsdf -csdf\n")
require.NoError(t, err)
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) {
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
require.NoError(t, err)
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -443,7 +443,7 @@ services:
}
func TestPorts(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
foo:
build:
@@ -463,21 +463,6 @@ services:
require.NoError(t, err)
}
func TestPlatforms(t *testing.T) {
dt := []byte(`
services:
foo:
build:
context: .
platforms:
- linux/amd64
- linux/arm64
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
}
func newBool(val bool) *bool {
b := val
return &b
@@ -679,7 +664,7 @@ target "default" {
}
func TestComposeNullArgs(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -695,7 +680,7 @@ services:
}
func TestDependsOn(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
foo:
build:
@@ -726,7 +711,7 @@ services:
`), 0644)
require.NoError(t, err)
dt := []byte(`
var dt = []byte(`
include:
- compose-foo.yml
@@ -755,7 +740,7 @@ services:
}
func TestDevelop(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -774,7 +759,7 @@ services:
}
func TestCgroup(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -787,7 +772,7 @@ services:
}
func TestProjectName(t *testing.T) {
dt := []byte(`
var dt = []byte(`
services:
scratch:
build:
@@ -813,37 +798,6 @@ services:
})
}
func TestServiceContext(t *testing.T) {
dt := []byte(`
services:
base:
build:
dockerfile: baseapp.Dockerfile
command: ./entrypoint.sh
webapp:
build:
context: ./dir
additional_contexts:
base: service:base
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name)
sort.Strings(c.Groups[0].Targets)
require.Equal(t, []string{"base", "webapp"}, c.Groups[0].Targets)
require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, "webapp", c.Targets[1].Name)
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
}
// chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) {

View File

@@ -19,8 +19,6 @@ import (
"github.com/docker/buildx/util/osutil"
"github.com/moby/buildkit/util/entitlements"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/go-csvvalue"
)
type EntitlementKey string
@@ -28,7 +26,6 @@ type EntitlementKey string
const (
EntitlementKeyNetworkHost EntitlementKey = "network.host"
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
EntitlementKeyDevice EntitlementKey = "device"
EntitlementKeyFSRead EntitlementKey = "fs.read"
EntitlementKeyFSWrite EntitlementKey = "fs.write"
EntitlementKeyFS EntitlementKey = "fs"
@@ -41,7 +38,6 @@ const (
type EntitlementConf struct {
NetworkHost bool
SecurityInsecure bool
Devices *EntitlementsDevicesConf
FSRead []string
FSWrite []string
ImagePush []string
@@ -49,11 +45,6 @@ type EntitlementConf struct {
SSH bool
}
type EntitlementsDevicesConf struct {
All bool
Devices map[string]struct{}
}
func ParseEntitlements(in []string) (EntitlementConf, error) {
var conf EntitlementConf
for _, e := range in {
@@ -67,22 +58,6 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
default:
k, v, _ := strings.Cut(e, "=")
switch k {
case string(EntitlementKeyDevice):
if v == "" {
conf.Devices = &EntitlementsDevicesConf{All: true}
continue
}
fields, err := csvvalue.Fields(v, nil)
if err != nil {
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
}
if conf.Devices == nil {
conf.Devices = &EntitlementsDevicesConf{}
}
if conf.Devices.Devices == nil {
conf.Devices.Devices = make(map[string]struct{}, 0)
}
conf.Devices.Devices[fields[0]] = struct{}{}
case string(EntitlementKeyFSRead):
conf.FSRead = append(conf.FSRead, v)
case string(EntitlementKeyFSWrite):
@@ -119,34 +94,12 @@ func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf,
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
for _, e := range bo.Allow {
k, rest, _ := strings.Cut(e, "=")
switch k {
case entitlements.EntitlementDevice.String():
if rest == "" {
if c.Devices == nil || !c.Devices.All {
expected.Devices = &EntitlementsDevicesConf{All: true}
}
continue
}
fields, err := csvvalue.Fields(rest, nil)
if err != nil {
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
}
if expected.Devices == nil {
expected.Devices = &EntitlementsDevicesConf{}
}
if expected.Devices.Devices == nil {
expected.Devices.Devices = make(map[string]struct{}, 0)
}
expected.Devices.Devices[fields[0]] = struct{}{}
}
switch e {
case entitlements.EntitlementNetworkHost.String():
case entitlements.EntitlementNetworkHost:
if !c.NetworkHost {
expected.NetworkHost = true
}
case entitlements.EntitlementSecurityInsecure.String():
case entitlements.EntitlementSecurityInsecure:
if !c.SecurityInsecure {
expected.SecurityInsecure = true
}
@@ -191,11 +144,9 @@ func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) erro
roPaths[p] = struct{}{}
}
if len(ssh.Paths) == 0 {
if !c.SSH {
expected.SSH = true
}
}
}
var err error
expected.FSRead, err = findMissingPaths(c.FSRead, roPaths)
@@ -233,18 +184,6 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
flags = append(flags, string(EntitlementKeySecurityInsecure))
}
if c.Devices != nil {
if c.Devices.All {
msgs = append(msgs, " - Access to CDI devices")
flags = append(flags, string(EntitlementKeyDevice))
} else {
for d := range c.Devices.Devices {
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
}
}
}
if c.SSH {
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
flagsFS = append(flagsFS, string(EntitlementKeySSH))
@@ -306,7 +245,7 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
}
args := slices.Clone(os.Args)
args := append([]string(nil), os.Args...)
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
args[0] = v
}
@@ -317,7 +256,7 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
}
fsEntitlementsEnabled := true
fsEntitlementsEnabled := false
if isRemote {
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
vv, err := strconv.ParseBool(v)
@@ -325,6 +264,8 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
}
fsEntitlementsEnabled = !vv
} else {
fsEntitlementsEnabled = true
}
}
v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
@@ -337,10 +278,10 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
}
if !fsEntitlementsEnabled && len(msgs) == 0 {
return nil
if !fsEntitlementsSet {
fmt.Fprintf(out, "This warning will become an error in a future release. To enable filesystem entitlements checks at the moment, set BUILDX_BAKE_ENTITLEMENTS_FS=1 .\n\n")
}
if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
return nil
}
if term {
@@ -502,21 +443,13 @@ func evaluatePaths(in []string) ([]string, bool, error) {
continue
}
v, err := filepath.Abs(p)
if err != nil {
logrus.Warnf("failed to evaluate entitlement path %q: %v", p, err)
continue
}
v, rest, err := evaluateToExistingPath(v)
if err != nil {
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
}
v, err = osutil.GetLongPathName(v)
v, err = filepath.EvalSymlinks(v)
if err != nil {
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
}
if rest != "" {
v = filepath.Join(v, rest)
}
out = append(out, v)
}
return out, allowAny, nil
@@ -525,7 +458,7 @@ func evaluatePaths(in []string) ([]string, bool, error) {
func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error) {
m := make(map[string]struct{}, len(in))
for p := range in {
v, _, err := evaluateToExistingPath(p)
v, err := evaluateToExistingPath(p)
if err != nil {
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
}
@@ -538,10 +471,10 @@ func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error
return m, nil
}
func evaluateToExistingPath(in string) (string, string, error) {
func evaluateToExistingPath(in string) (string, error) {
in, err := filepath.Abs(in)
if err != nil {
return "", "", err
return "", err
}
volLen := volumeNameLen(in)
@@ -596,29 +529,29 @@ func evaluateToExistingPath(in string) (string, string, error) {
if os.IsNotExist(err) {
for r := len(dest) - 1; r >= volLen; r-- {
if os.IsPathSeparator(dest[r]) {
return dest[:r], in[start:], nil
return dest[:r], nil
}
}
return vol, in[start:], nil
return vol, nil
}
return "", "", err
return "", err
}
if fi.Mode()&fs.ModeSymlink == 0 {
if !fi.Mode().IsDir() && end < len(in) {
return "", "", syscall.ENOTDIR
return "", syscall.ENOTDIR
}
continue
}
linksWalked++
if linksWalked > 255 {
return "", "", errors.New("too many symlinks")
return "", errors.New("too many symlinks")
}
link, err := os.Readlink(dest)
if err != nil {
return "", "", err
return "", err
}
in = link + in[end:]
@@ -651,7 +584,7 @@ func evaluateToExistingPath(in string) (string, string, error) {
end = 0
}
}
return filepath.Clean(dest), "", nil
return filepath.Clean(dest), nil
}
func volumeNameLen(s string) int {

View File

@@ -89,7 +89,7 @@ func TestEvaluateToExistingPath(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, _, err := evaluateToExistingPath(tt.input)
result, err := evaluateToExistingPath(tt.input)
if tt.expectErr {
require.Error(t, err)
@@ -208,8 +208,8 @@ func TestValidateEntitlements(t *testing.T) {
{
name: "NetworkHostMissing",
opt: build.Options{
Allow: []string{
entitlements.EntitlementNetworkHost.String(),
Allow: []entitlements.Entitlement{
entitlements.EntitlementNetworkHost,
},
},
expected: EntitlementConf{
@@ -223,8 +223,8 @@ func TestValidateEntitlements(t *testing.T) {
NetworkHost: true,
},
opt: build.Options{
Allow: []string{
entitlements.EntitlementNetworkHost.String(),
Allow: []entitlements.Entitlement{
entitlements.EntitlementNetworkHost,
},
},
expected: EntitlementConf{
@@ -234,9 +234,9 @@ func TestValidateEntitlements(t *testing.T) {
{
name: "SecurityAndNetworkHostMissing",
opt: build.Options{
Allow: []string{
entitlements.EntitlementNetworkHost.String(),
entitlements.EntitlementSecurityInsecure.String(),
Allow: []entitlements.Entitlement{
entitlements.EntitlementNetworkHost,
entitlements.EntitlementSecurityInsecure,
},
},
expected: EntitlementConf{
@@ -251,9 +251,9 @@ func TestValidateEntitlements(t *testing.T) {
NetworkHost: true,
},
opt: build.Options{
Allow: []string{
entitlements.EntitlementNetworkHost.String(),
entitlements.EntitlementSecurityInsecure.String(),
Allow: []entitlements.Entitlement{
entitlements.EntitlementNetworkHost,
entitlements.EntitlementSecurityInsecure,
},
},
expected: EntitlementConf{
@@ -341,7 +341,7 @@ func TestValidateEntitlements(t *testing.T) {
return nil
}
// if not, then escapeLink is not allowed
exp, _, err := evaluateToExistingPath(escapeLink)
exp, err := evaluateToExistingPath(escapeLink)
require.NoError(t, err)
exp, err = filepath.EvalSymlinks(exp)
require.NoError(t, err)
@@ -363,48 +363,6 @@ func TestValidateEntitlements(t *testing.T) {
},
expected: EntitlementConf{},
},
{
name: "NonExistingAllowedPathSubpath",
opt: build.Options{
ExportsLocalPathsTemporary: []string{
dir1,
},
},
conf: EntitlementConf{
FSRead: []string{wd},
FSWrite: []string{filepath.Join(dir1, "not/exists")},
},
expected: EntitlementConf{
FSWrite: []string{expDir1}, // dir1 is still needed as only subpath was allowed
},
},
{
name: "NonExistingAllowedPathMatches",
opt: build.Options{
ExportsLocalPathsTemporary: []string{
filepath.Join(dir1, "not/exists"),
},
},
conf: EntitlementConf{
FSRead: []string{wd},
FSWrite: []string{filepath.Join(dir1, "not/exists")},
},
expected: EntitlementConf{
FSWrite: []string{expDir1}, // dir1 is still needed as build also needs to write not/exists directory
},
},
{
name: "NonExistingBuildPath",
opt: build.Options{
ExportsLocalPathsTemporary: []string{
filepath.Join(dir1, "not/exists"),
},
},
conf: EntitlementConf{
FSRead: []string{wd},
FSWrite: []string{dir1},
},
},
}
for _, tc := range tcases {

View File

@@ -2,10 +2,8 @@ package bake
import (
"reflect"
"regexp"
"testing"
hcl "github.com/hashicorp/hcl/v2"
"github.com/stretchr/testify/require"
)
@@ -19,7 +17,6 @@ func TestHCLBasic(t *testing.T) {
target "db" {
context = "./db"
tags = ["docker.io/tonistiigi/db"]
output = ["type=image"]
}
target "webapp" {
@@ -28,9 +25,6 @@ func TestHCLBasic(t *testing.T) {
args = {
buildno = "123"
}
output = [
{ type = "image" }
]
}
target "cross" {
@@ -603,167 +597,6 @@ func TestHCLAttrsCustomType(t *testing.T) {
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
}
func TestHCLAttrsCapsuleType(t *testing.T) {
dt := []byte(`
target "app" {
attest = [
{ type = "provenance", mode = "max" },
"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
]
cache-from = [
{ type = "registry", ref = "user/app:cache" },
"type=local,src=path/to/cache",
]
cache-to = [
{ type = "local", dest = "path/to/cache" },
]
output = [
{ type = "oci", dest = "../out.tar" },
"type=local,dest=../out",
]
secret = [
{ id = "mysecret", src = "/local/secret" },
{ id = "mysecret2", env = "TOKEN" },
]
ssh = [
{ id = "default" },
{ id = "key", paths = ["path/to/key"] },
]
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,env=TOKEN"}, stringify(c.Targets[0].Secrets))
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
}
func TestHCLAttrsCapsuleType_ObjectVars(t *testing.T) {
dt := []byte(`
variable "foo" {
default = "bar"
}
target "app" {
cache-from = [
{ type = "registry", ref = "user/app:cache" },
"type=local,src=path/to/cache",
]
cache-to = [ target.app.cache-from[0] ]
output = [
{ type = "oci", dest = "../out.tar" },
"type=local,dest=../out",
]
secret = [
{ id = "mysecret", src = "/local/secret" },
]
ssh = [
{ id = "default" },
{ id = "key", paths = ["path/to/${target.app.output[0].type}"] },
]
}
target "web" {
cache-from = target.app.cache-from
output = [ "type=oci,dest=../${foo}.tar" ]
secret = [
{ id = target.app.output[0].type, src = "/${target.app.cache-from[1].type}/secret" },
]
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
findTarget := func(t *testing.T, name string) *Target {
t.Helper()
for _, tgt := range c.Targets {
if tgt.Name == name {
return tgt
}
}
t.Fatalf("could not find target %q", name)
return nil
}
app := findTarget(t, "app")
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(app.Outputs))
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(app.CacheFrom))
require.Equal(t, []string{"user/app:cache"}, stringify(app.CacheTo))
require.Equal(t, []string{"id=mysecret,src=/local/secret"}, stringify(app.Secrets))
require.Equal(t, []string{"default", "key=path/to/oci"}, stringify(app.SSH))
web := findTarget(t, "web")
require.Equal(t, []string{"type=oci,dest=../bar.tar"}, stringify(web.Outputs))
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(web.CacheFrom))
require.Equal(t, []string{"id=oci,src=/local/secret"}, stringify(web.Secrets))
}
func TestHCLAttrsCapsuleType_MissingVars(t *testing.T) {
dt := []byte(`
target "app" {
attest = [
"type=sbom,disabled=${SBOM}",
]
cache-from = [
{ type = "registry", ref = "user/app:${FOO1}" },
"type=local,src=path/to/cache:${FOO2}",
]
cache-to = [
{ type = "local", dest = "path/to/${BAR}" },
]
output = [
{ type = "oci", dest = "../${OUTPUT}.tar" },
]
secret = [
{ id = "mysecret", src = "/local/${SECRET}" },
]
ssh = [
{ id = "key", paths = ["path/to/${SSH_KEY}"] },
]
}
`)
var diags hcl.Diagnostics
_, err := ParseFile(dt, "docker-bake.hcl")
require.ErrorAs(t, err, &diags)
re := regexp.MustCompile(`There is no variable named "([\w\d_]+)"`)
var actual []string
for _, diag := range diags {
if m := re.FindStringSubmatch(diag.Error()); m != nil {
actual = append(actual, m[1])
}
}
require.ElementsMatch(t,
[]string{"SBOM", "FOO1", "FOO2", "BAR", "OUTPUT", "SECRET", "SSH_KEY"},
actual)
}
func TestHCLMultiFileAttrs(t *testing.T) {
dt := []byte(`
variable "FOO" {
@@ -1645,7 +1478,7 @@ func TestHCLIndexOfFunc(t *testing.T) {
require.Empty(t, c.Targets[1].Tags[1])
}
func ptrstr(s any) *string {
func ptrstr(s interface{}) *string {
var n *string
if reflect.ValueOf(s).Kind() == reflect.String {
ss := s.(string)

View File

@@ -1,355 +0,0 @@
Copyright (c) 2014 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -1,348 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"fmt"
"reflect"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
)
// DecodeOptions allows customizing sections of the decoding process.
type DecodeOptions struct {
ImpliedType func(gv any) (cty.Type, error)
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
}
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
o = o.withDefaults()
rv := reflect.ValueOf(val)
if rv.Kind() != reflect.Ptr {
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
}
return o.decodeBodyToValue(body, ctx, rv.Elem())
}
// DecodeBody extracts the configuration within the given body into the given
// value. This value must be a non-nil pointer to either a struct or
// a map, where in the former case the configuration will be decoded using
// struct tags and in the latter case only attributes are allowed and their
// values are decoded into the map.
//
// The given EvalContext is used to resolve any variables or functions in
// expressions encountered while decoding. This may be nil to require only
// constant values, for simple applications that do not support variables or
// functions.
//
// The returned diagnostics should be inspected with its HasErrors method to
// determine if the populated value is valid and complete. If error diagnostics
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
return DecodeOptions{}.DecodeBody(body, ctx, val)
}
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
et := val.Type()
switch et.Kind() {
case reflect.Struct:
return o.decodeBodyToStruct(body, ctx, val)
case reflect.Map:
return o.decodeBodyToMap(body, ctx, val)
default:
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
}
}
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
schema, partial := ImpliedBodySchema(val.Interface())
var content *hcl.BodyContent
var leftovers hcl.Body
var diags hcl.Diagnostics
if partial {
content, leftovers, diags = body.PartialContent(schema)
} else {
content, diags = body.Content(schema)
}
if content == nil {
return diags
}
tags := getFieldTags(val.Type())
if tags.Body != nil {
fieldIdx := *tags.Body
field := val.Type().Field(fieldIdx)
fieldV := val.Field(fieldIdx)
switch {
case bodyType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(body))
default:
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
}
}
if tags.Remain != nil {
fieldIdx := *tags.Remain
field := val.Type().Field(fieldIdx)
fieldV := val.Field(fieldIdx)
switch {
case bodyType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(leftovers))
case attrsType.AssignableTo(field.Type):
attrs, attrsDiags := leftovers.JustAttributes()
if len(attrsDiags) > 0 {
diags = append(diags, attrsDiags...)
}
fieldV.Set(reflect.ValueOf(attrs))
default:
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
}
}
for name, fieldIdx := range tags.Attributes {
attr := content.Attributes[name]
field := val.Type().Field(fieldIdx)
fieldV := val.Field(fieldIdx)
if attr == nil {
if !exprType.AssignableTo(field.Type) {
continue
}
// As a special case, if the target is of type hcl.Expression then
// we'll assign an actual expression that evalues to a cty null,
// so the caller can deal with it within the cty realm rather
// than within the Go realm.
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
fieldV.Set(reflect.ValueOf(synthExpr))
continue
}
switch {
case attrType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(attr))
case exprType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(attr.Expr))
default:
diags = append(diags, o.DecodeExpression(
attr.Expr, ctx, fieldV.Addr().Interface(),
)...)
}
}
blocksByType := content.Blocks.ByType()
for typeName, fieldIdx := range tags.Blocks {
blocks := blocksByType[typeName]
field := val.Type().Field(fieldIdx)
ty := field.Type
isSlice := false
isPtr := false
if ty.Kind() == reflect.Slice {
isSlice = true
ty = ty.Elem()
}
if ty.Kind() == reflect.Ptr {
isPtr = true
ty = ty.Elem()
}
if len(blocks) > 1 && !isSlice {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate %s block", typeName),
Detail: fmt.Sprintf(
"Only one %s block is allowed. Another was defined at %s.",
typeName, blocks[0].DefRange.String(),
),
Subject: &blocks[1].DefRange,
})
continue
}
if len(blocks) == 0 {
if isSlice || isPtr {
if val.Field(fieldIdx).IsNil() {
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
}
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Missing %s block", typeName),
Detail: fmt.Sprintf("A %s block is required.", typeName),
Subject: body.MissingItemRange().Ptr(),
})
}
continue
}
switch {
case isSlice:
elemType := ty
if isPtr {
elemType = reflect.PointerTo(ty)
}
sli := val.Field(fieldIdx)
if sli.IsNil() {
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
}
for i, block := range blocks {
if isPtr {
if i >= sli.Len() {
sli = reflect.Append(sli, reflect.New(ty))
}
v := sli.Index(i)
if v.IsNil() {
v = reflect.New(ty)
}
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
sli.Index(i).Set(v)
} else {
if i >= sli.Len() {
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
}
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
}
}
if sli.Len() > len(blocks) {
sli.SetLen(len(blocks))
}
val.Field(fieldIdx).Set(sli)
default:
block := blocks[0]
if isPtr {
v := val.Field(fieldIdx)
if v.IsNil() {
v = reflect.New(ty)
}
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
val.Field(fieldIdx).Set(v)
} else {
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
}
}
}
return diags
}
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
attrs, diags := body.JustAttributes()
if attrs == nil {
return diags
}
mv := reflect.MakeMap(v.Type())
for k, attr := range attrs {
switch {
case attrType.AssignableTo(v.Type().Elem()):
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
case exprType.AssignableTo(v.Type().Elem()):
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
default:
ev := reflect.New(v.Type().Elem())
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
}
}
v.Set(mv)
return diags
}
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
diags := o.decodeBodyToValue(block.Body, ctx, v)
if len(block.Labels) > 0 {
blockTags := getFieldTags(v.Type())
for li, lv := range block.Labels {
lfieldIdx := blockTags.Labels[li].FieldIndex
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
}
}
return diags
}
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
o = o.withDefaults()
srcVal, diags := expr.Value(ctx)
convTy, err := o.ImpliedType(val)
if err != nil {
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
}
srcVal, err = o.Convert(srcVal, convTy)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsuitable value type",
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
Subject: expr.StartRange().Ptr(),
Context: expr.Range().Ptr(),
})
return diags
}
err = gocty.FromCtyValue(srcVal, val)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsuitable value type",
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
Subject: expr.StartRange().Ptr(),
Context: expr.Range().Ptr(),
})
}
return diags
}
// DecodeExpression extracts the value of the given expression into the given
// value. This value must be something that gocty is able to decode into,
// since the final decoding is delegated to that package.
//
// The given EvalContext is used to resolve any variables or functions in
// expressions encountered while decoding. This may be nil to require only
// constant values, for simple applications that do not support variables or
// functions.
//
// The returned diagnostics should be inspected with its HasErrors method to
// determine if the populated value is valid and complete. If error diagnostics
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
}
func (o DecodeOptions) withDefaults() DecodeOptions {
if o.ImpliedType == nil {
o.ImpliedType = gocty.ImpliedType
}
if o.Convert == nil {
o.Convert = convert.Convert
}
return o
}

View File

@@ -1,806 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/hcl/v2"
hclJSON "github.com/hashicorp/hcl/v2/json"
"github.com/zclconf/go-cty/cty"
)
func TestDecodeBody(t *testing.T) {
deepEquals := func(other any) func(v any) bool {
return func(v any) bool {
return reflect.DeepEqual(v, other)
}
}
type withNameExpression struct {
Name hcl.Expression `hcl:"name"`
}
type withTwoAttributes struct {
A string `hcl:"a,optional"`
B string `hcl:"b,optional"`
}
type withNestedBlock struct {
Plain string `hcl:"plain,optional"`
Nested *withTwoAttributes `hcl:"nested,block"`
}
type withListofNestedBlocks struct {
Nested []*withTwoAttributes `hcl:"nested,block"`
}
type withListofNestedBlocksNoPointers struct {
Nested []withTwoAttributes `hcl:"nested,block"`
}
tests := []struct {
Body map[string]any
Target func() any
Check func(v any) bool
DiagCount int
}{
{
map[string]any{},
makeInstantiateType(struct{}{}),
deepEquals(struct{}{}),
0,
},
{
map[string]any{},
makeInstantiateType(struct {
Name string `hcl:"name"`
}{}),
deepEquals(struct {
Name string `hcl:"name"`
}{}),
1, // name is required
},
{
map[string]any{},
makeInstantiateType(struct {
Name *string `hcl:"name"`
}{}),
deepEquals(struct {
Name *string `hcl:"name"`
}{}),
0,
}, // name nil
{
map[string]any{},
makeInstantiateType(struct {
Name string `hcl:"name,optional"`
}{}),
deepEquals(struct {
Name string `hcl:"name,optional"`
}{}),
0,
}, // name optional
{
map[string]any{},
makeInstantiateType(withNameExpression{}),
func(v any) bool {
if v == nil {
return false
}
wne, valid := v.(withNameExpression)
if !valid {
return false
}
if wne.Name == nil {
return false
}
nameVal, _ := wne.Name.Value(nil)
return nameVal.IsNull()
},
0,
},
{
map[string]any{
"name": "Ermintrude",
},
makeInstantiateType(withNameExpression{}),
func(v any) bool {
if v == nil {
return false
}
wne, valid := v.(withNameExpression)
if !valid {
return false
}
if wne.Name == nil {
return false
}
nameVal, _ := wne.Name.Value(nil)
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
},
0,
},
{
map[string]any{
"name": "Ermintrude",
},
makeInstantiateType(struct {
Name string `hcl:"name"`
}{}),
deepEquals(struct {
Name string `hcl:"name"`
}{"Ermintrude"}),
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 23,
},
makeInstantiateType(struct {
Name string `hcl:"name"`
}{}),
deepEquals(struct {
Name string `hcl:"name"`
}{"Ermintrude"}),
1, // Extraneous "age" property
},
{
map[string]any{
"name": "Ermintrude",
"age": 50,
},
makeInstantiateType(struct {
Name string `hcl:"name"`
Attrs hcl.Attributes `hcl:",remain"`
}{}),
func(gotI any) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Attrs hcl.Attributes `hcl:",remain"`
})
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
},
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 50,
},
makeInstantiateType(struct {
Name string `hcl:"name"`
Remain hcl.Body `hcl:",remain"`
}{}),
func(gotI any) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Remain hcl.Body `hcl:",remain"`
})
attrs, _ := got.Remain.JustAttributes()
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
},
0,
},
{
map[string]any{
"name": "Ermintrude",
"living": true,
},
makeInstantiateType(struct {
Name string `hcl:"name"`
Remain map[string]cty.Value `hcl:",remain"`
}{}),
deepEquals(struct {
Name string `hcl:"name"`
Remain map[string]cty.Value `hcl:",remain"`
}{
Name: "Ermintrude",
Remain: map[string]cty.Value{
"living": cty.True,
},
}),
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 50,
},
makeInstantiateType(struct {
Name string `hcl:"name"`
Body hcl.Body `hcl:",body"`
Remain hcl.Body `hcl:",remain"`
}{}),
func(gotI any) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Body hcl.Body `hcl:",body"`
Remain hcl.Body `hcl:",remain"`
})
attrs, _ := got.Body.JustAttributes()
return got.Name == "Ermintrude" && len(attrs) == 2 &&
attrs["name"] != nil && attrs["age"] != nil
},
0,
},
{
map[string]any{
"noodle": map[string]any{},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating no diagnostics is good enough for this one.
return true
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating no diagnostics is good enough for this one.
return true
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": []map[string]any{},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": map[string]any{},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle != nil
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle != nil
},
0,
},
{
map[string]any{
"noodle": []map[string]any{},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle == nil
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": []map[string]any{},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
return len(noodle) == 0
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
return len(noodle) == 1
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
return len(noodle) == 2
},
0,
},
{
map[string]any{
"noodle": map[string]any{},
},
makeInstantiateType(struct {
Noodle struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
//nolint:misspell
// Generating two diagnostics is good enough for this one.
// (one for the missing noodle block and the other for
// the JSON serialization detecting the missing level of
// heirarchy for the label.)
return true
},
2,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
},
},
makeInstantiateType(struct {
Noodle struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodle := gotI.(struct {
Noodle struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}).Noodle
return noodle.Name == "foo_foo"
},
0,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
"bar_baz": map[string]any{},
},
},
makeInstantiateType(struct {
Noodle struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
// One diagnostic is enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
"bar_baz": map[string]any{},
},
},
makeInstantiateType(struct {
Noodles []struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodles := gotI.(struct {
Noodles []struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}).Noodles
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
},
0,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{
"type": "rice",
},
},
},
makeInstantiateType(struct {
Noodle struct {
Name string `hcl:"name,label"`
Type string `hcl:"type"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
noodle := gotI.(struct {
Noodle struct {
Name string `hcl:"name,label"`
Type string `hcl:"type"`
} `hcl:"noodle,block"`
}).Noodle
return noodle.Name == "foo_foo" && noodle.Type == "rice"
},
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 34,
},
makeInstantiateType(map[string]string(nil)),
deepEquals(map[string]string{
"name": "Ermintrude",
"age": "34",
}),
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 89,
},
makeInstantiateType(map[string]*hcl.Attribute(nil)),
func(gotI any) bool {
got := gotI.(map[string]*hcl.Attribute)
return len(got) == 2 && got["name"] != nil && got["age"] != nil
},
0,
},
{
map[string]any{
"name": "Ermintrude",
"age": 13,
},
makeInstantiateType(map[string]hcl.Expression(nil)),
func(gotI any) bool {
got := gotI.(map[string]hcl.Expression)
return len(got) == 2 && got["name"] != nil && got["age"] != nil
},
0,
},
{
map[string]any{
"name": "Ermintrude",
"living": true,
},
makeInstantiateType(map[string]cty.Value(nil)),
deepEquals(map[string]cty.Value{
"name": cty.StringVal("Ermintrude"),
"living": cty.True,
}),
0,
},
{
// Retain "nested" block while decoding
map[string]any{
"plain": "foo",
},
func() any {
return &withNestedBlock{
Plain: "bar",
Nested: &withTwoAttributes{
A: "bar",
},
}
},
func(gotI any) bool {
foo := gotI.(withNestedBlock)
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
},
0,
},
{
// Retain values in "nested" block while decoding
map[string]any{
"nested": map[string]any{
"a": "foo",
},
},
func() any {
return &withNestedBlock{
Nested: &withTwoAttributes{
B: "bar",
},
}
},
func(gotI any) bool {
foo := gotI.(withNestedBlock)
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
},
0,
},
{
// Retain values in "nested" block list while decoding
map[string]any{
"nested": []map[string]any{
{
"a": "foo",
},
},
},
func() any {
return &withListofNestedBlocks{
Nested: []*withTwoAttributes{
{
B: "bar",
},
},
}
},
func(gotI any) bool {
n := gotI.(withListofNestedBlocks)
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
},
0,
},
{
// Remove additional elements from the list while decoding nested blocks
map[string]any{
"nested": []map[string]any{
{
"a": "foo",
},
},
},
func() any {
return &withListofNestedBlocks{
Nested: []*withTwoAttributes{
{
B: "bar",
},
{
B: "bar",
},
},
}
},
func(gotI any) bool {
n := gotI.(withListofNestedBlocks)
return len(n.Nested) == 1
},
0,
},
{
// Make sure decoding value slices works the same as pointer slices.
map[string]any{
"nested": []map[string]any{
{
"b": "bar",
},
{
"b": "baz",
},
},
},
func() any {
return &withListofNestedBlocksNoPointers{
Nested: []withTwoAttributes{
{
B: "foo",
},
},
}
},
func(gotI any) bool {
n := gotI.(withListofNestedBlocksNoPointers)
return n.Nested[0].B == "bar" && len(n.Nested) == 2
},
0,
},
}
for i, test := range tests {
// For convenience here we're going to use the JSON parser
// to process the given body.
buf, err := json.Marshal(test.Body)
if err != nil {
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
}
t.Run(string(buf), func(t *testing.T) {
file, diags := hclJSON.Parse(buf, "test.json")
if len(diags) != 0 {
t.Fatalf("diagnostics while parsing: %s", diags.Error())
}
targetVal := reflect.ValueOf(test.Target())
diags = DecodeBody(file.Body, nil, targetVal.Interface())
if len(diags) != test.DiagCount {
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
for _, diag := range diags {
t.Logf(" - %s", diag.Error())
}
}
got := targetVal.Elem().Interface()
if !test.Check(got) {
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
}
})
}
}
func TestDecodeExpression(t *testing.T) {
tests := []struct {
Value cty.Value
Target any
Want any
DiagCount int
}{
{
cty.StringVal("hello"),
"",
"hello",
0,
},
{
cty.StringVal("hello"),
cty.NilVal,
cty.StringVal("hello"),
0,
},
{
cty.NumberIntVal(2),
"",
"2",
0,
},
{
cty.StringVal("true"),
false,
true,
0,
},
{
cty.NullVal(cty.String),
"",
"",
1, // null value is not allowed
},
{
cty.UnknownVal(cty.String),
"",
"",
1, // value must be known
},
{
cty.ListVal([]cty.Value{cty.True}),
false,
false,
1, // bool required
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
expr := &fixedExpression{test.Value}
targetVal := reflect.New(reflect.TypeOf(test.Target))
diags := DecodeExpression(expr, nil, targetVal.Interface())
if len(diags) != test.DiagCount {
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
for _, diag := range diags {
t.Logf(" - %s", diag.Error())
}
}
got := targetVal.Elem().Interface()
if !reflect.DeepEqual(got, test.Want) {
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
}
})
}
}
type fixedExpression struct {
val cty.Value
}
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
return e.val, nil
}
func (e *fixedExpression) Range() (r hcl.Range) {
return
}
func (e *fixedExpression) StartRange() (r hcl.Range) {
return
}
func (e *fixedExpression) Variables() []hcl.Traversal {
return nil
}
func makeInstantiateType(target any) func() any {
return func() any {
return reflect.New(reflect.TypeOf(target)).Interface()
}
}

View File

@@ -1,65 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package gohcl allows decoding HCL configurations into Go data structures.
//
// It provides a convenient and concise way of describing the schema for
// configuration and then accessing the resulting data via native Go
// types.
//
// A struct field tag scheme is used, similar to other decoding and
// unmarshalling libraries. The tags are formatted as in the following example:
//
// ThingType string `hcl:"thing_type,attr"`
//
// Within each tag there are two comma-separated tokens. The first is the
// name of the corresponding construct in configuration, while the second
// is a keyword giving the kind of construct expected. The following
// kind keywords are supported:
//
// attr (the default) indicates that the value is to be populated from an attribute
// block indicates that the value is to populated from a block
// label indicates that the value is to populated from a block label
// optional is the same as attr, but the field is optional
// remain indicates that the value is to be populated from the remaining body after populating other fields
//
// "attr" fields may either be of type *hcl.Expression, in which case the raw
// expression is assigned, or of any type accepted by gocty, in which case
// gocty will be used to assign the value to a native Go type.
//
// "block" fields may be a struct that recursively uses the same tags, or a
// slice of such structs, in which case multiple blocks of the corresponding
// type are decoded into the slice.
//
// "body" can be placed on a single field of type hcl.Body to capture
// the full hcl.Body that was decoded for a block. This does not allow leftover
// values like "remain", so a decoding error will still be returned if leftover
// fields are given. If you want to capture the decoding body PLUS leftover
// fields, you must specify a "remain" field as well to prevent errors. The
// body field and the remain field will both contain the leftover fields.
//
// "label" fields are considered only in a struct used as the type of a field
// marked as "block", and are used sequentially to capture the labels of
// the blocks being decoded. In this case, the name token is used only as
// an identifier for the label in diagnostic messages.
//
// "optional" fields behave like "attr" fields, but they are optional
// and will not give parsing errors if they are missing.
//
// "remain" can be placed on a single field that may be either of type
// hcl.Body or hcl.Attributes, in which case any remaining body content is
// placed into this field for delayed processing. If no "remain" field is
// present then any attributes or blocks not matched by another valid tag
// will cause an error diagnostic.
//
// Only a subset of this tagging/typing vocabulary is supported for the
// "Encode" family of functions. See the EncodeIntoBody docs for full details
// on the constraints there.
//
// Broadly-speaking this package deals with two types of error. The first is
// errors in the configuration itself, which are returned as diagnostics
// written with the configuration author as the target audience. The second
// is bugs in the calling program, such as invalid struct tags, which are
// surfaced via panics since there can be no useful runtime handling of such
// errors and they should certainly not be returned to the user as diagnostics.
package gohcl

View File

@@ -1,192 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"fmt"
"reflect"
"sort"
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/zclconf/go-cty/cty/gocty"
)
// EncodeIntoBody replaces the contents of the given hclwrite Body with
// attributes and blocks derived from the given value, which must be a
// struct value or a pointer to a struct value with the struct tags defined
// in this package.
//
// This function can work only with fully-decoded data. It will ignore any
// fields tagged as "remain", any fields that decode attributes into either
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
// into hcl.Attributes values. This function does not have enough information
// to complete the decoding of these types.
//
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
// to produce a whole hclwrite.Block including block labels.
//
// As long as a suitable value is given to encode and the destination body
// is non-nil, this function will always complete. It will panic in case of
// any errors in the calling program, such as passing an inappropriate type
// or a nil body.
//
// The layout of the resulting HCL source is derived from the ordering of
// the struct fields, with blank lines around nested blocks of different types.
// Fields representing attributes should usually precede those representing
// blocks so that the attributes can group together in the result. For more
// control, use the hclwrite API directly.
func EncodeIntoBody(val any, dst *hclwrite.Body) {
rv := reflect.ValueOf(val)
ty := rv.Type()
if ty.Kind() == reflect.Ptr {
rv = rv.Elem()
ty = rv.Type()
}
if ty.Kind() != reflect.Struct {
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
}
tags := getFieldTags(ty)
populateBody(rv, ty, tags, dst)
}
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
// the given value, which must be a struct or pointer to struct with the
// struct tags defined in this package.
//
// If the given struct type has fields tagged with "label" tags then they
// will be used in order to annotate the created block with labels.
//
// This function has the same constraints as EncodeIntoBody and will panic
// if they are violated.
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
rv := reflect.ValueOf(val)
ty := rv.Type()
if ty.Kind() == reflect.Ptr {
rv = rv.Elem()
ty = rv.Type()
}
if ty.Kind() != reflect.Struct {
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
}
tags := getFieldTags(ty)
labels := make([]string, len(tags.Labels))
for i, lf := range tags.Labels {
lv := rv.Field(lf.FieldIndex)
// We just stringify whatever we find. It should always be a string
// but if not then we'll still do something reasonable.
labels[i] = fmt.Sprintf("%s", lv.Interface())
}
block := hclwrite.NewBlock(blockType, labels)
populateBody(rv, ty, tags, block.Body())
return block
}
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
for n, i := range tags.Attributes {
nameIdxs[n] = i
namesOrder = append(namesOrder, n)
}
for n, i := range tags.Blocks {
nameIdxs[n] = i
namesOrder = append(namesOrder, n)
}
sort.SliceStable(namesOrder, func(i, j int) bool {
ni, nj := namesOrder[i], namesOrder[j]
return nameIdxs[ni] < nameIdxs[nj]
})
dst.Clear()
prevWasBlock := false
for _, name := range namesOrder {
fieldIdx := nameIdxs[name]
field := ty.Field(fieldIdx)
fieldTy := field.Type
fieldVal := rv.Field(fieldIdx)
if fieldTy.Kind() == reflect.Ptr {
fieldTy = fieldTy.Elem()
fieldVal = fieldVal.Elem()
}
if _, isAttr := tags.Attributes[name]; isAttr {
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
continue // ignore undecoded fields
}
if !fieldVal.IsValid() {
continue // ignore (field value is nil pointer)
}
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
continue // ignore
}
if prevWasBlock {
dst.AppendNewline()
prevWasBlock = false
}
valTy, err := gocty.ImpliedType(fieldVal.Interface())
if err != nil {
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
}
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
if err != nil {
// This should never happen, since we should always be able
// to decode into the implied type.
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
}
dst.SetAttributeValue(name, val)
} else { // must be a block, then
elemTy := fieldTy
isSeq := false
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
isSeq = true
elemTy = elemTy.Elem()
}
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
continue // ignore undecoded fields
}
prevWasBlock = false
if isSeq {
l := fieldVal.Len()
for i := range l {
elemVal := fieldVal.Index(i)
if !elemVal.IsValid() {
continue // ignore (elem value is nil pointer)
}
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
continue // ignore
}
block := EncodeAsBlock(elemVal.Interface(), name)
if !prevWasBlock {
dst.AppendNewline()
prevWasBlock = true
}
dst.AppendBlock(block)
}
} else {
if !fieldVal.IsValid() {
continue // ignore (field value is nil pointer)
}
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
continue // ignore
}
block := EncodeAsBlock(fieldVal.Interface(), name)
if !prevWasBlock {
dst.AppendNewline()
prevWasBlock = true
}
dst.AppendBlock(block)
}
}
}
}

View File

@@ -1,67 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl_test
import (
"fmt"
"github.com/hashicorp/hcl/v2/gohcl"
"github.com/hashicorp/hcl/v2/hclwrite"
)
func ExampleEncodeIntoBody() {
type Service struct {
Name string `hcl:"name,label"`
Exe []string `hcl:"executable"`
}
type Constraints struct {
OS string `hcl:"os"`
Arch string `hcl:"arch"`
}
type App struct {
Name string `hcl:"name"`
Desc string `hcl:"description"`
Constraints *Constraints `hcl:"constraints,block"`
Services []Service `hcl:"service,block"`
}
app := App{
Name: "awesome-app",
Desc: "Such an awesome application",
Constraints: &Constraints{
OS: "linux",
Arch: "amd64",
},
Services: []Service{
{
Name: "web",
Exe: []string{"./web", "--listen=:8080"},
},
{
Name: "worker",
Exe: []string{"./worker"},
},
},
}
f := hclwrite.NewEmptyFile()
gohcl.EncodeIntoBody(&app, f.Body())
fmt.Printf("%s", f.Bytes())
// Output:
// name = "awesome-app"
// description = "Such an awesome application"
//
// constraints {
// os = "linux"
// arch = "amd64"
// }
//
// service "web" {
// executable = ["./web", "--listen=:8080"]
// }
// service "worker" {
// executable = ["./worker"]
// }
}

View File

@@ -1,185 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"fmt"
"reflect"
"sort"
"strings"
"github.com/hashicorp/hcl/v2"
)
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
// given value, which must be a struct value or a pointer to one. If an
// inappropriate value is passed, this function will panic.
//
// The second return argument indicates whether the given struct includes
// a "remain" field, and thus the returned schema is non-exhaustive.
//
// This uses the tags on the fields of the struct to discover how each
// field's value should be expressed within configuration. If an invalid
// mapping is attempted, this function will panic.
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
ty := reflect.TypeOf(val)
if ty.Kind() == reflect.Ptr {
ty = ty.Elem()
}
if ty.Kind() != reflect.Struct {
panic(fmt.Sprintf("given value must be struct, not %T", val))
}
var attrSchemas []hcl.AttributeSchema
var blockSchemas []hcl.BlockHeaderSchema
tags := getFieldTags(ty)
attrNames := make([]string, 0, len(tags.Attributes))
for n := range tags.Attributes {
attrNames = append(attrNames, n)
}
sort.Strings(attrNames)
for _, n := range attrNames {
idx := tags.Attributes[n]
optional := tags.Optional[n]
field := ty.Field(idx)
var required bool
switch {
case field.Type.AssignableTo(exprType):
//nolint:misspell
// If we're decoding to hcl.Expression then absense can be
// indicated via a null value, so we don't specify that
// the field is required during decoding.
required = false
case field.Type.Kind() != reflect.Ptr && !optional:
required = true
default:
required = false
}
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
Name: n,
Required: required,
})
}
blockNames := make([]string, 0, len(tags.Blocks))
for n := range tags.Blocks {
blockNames = append(blockNames, n)
}
sort.Strings(blockNames)
for _, n := range blockNames {
idx := tags.Blocks[n]
field := ty.Field(idx)
fty := field.Type
if fty.Kind() == reflect.Slice {
fty = fty.Elem()
}
if fty.Kind() == reflect.Ptr {
fty = fty.Elem()
}
if fty.Kind() != reflect.Struct {
panic(fmt.Sprintf(
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
))
}
ftags := getFieldTags(fty)
var labelNames []string
if len(ftags.Labels) > 0 {
labelNames = make([]string, len(ftags.Labels))
for i, l := range ftags.Labels {
labelNames[i] = l.Name
}
}
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
Type: n,
LabelNames: labelNames,
})
}
partial = tags.Remain != nil
schema = &hcl.BodySchema{
Attributes: attrSchemas,
Blocks: blockSchemas,
}
return schema, partial
}
type fieldTags struct {
Attributes map[string]int
Blocks map[string]int
Labels []labelField
Remain *int
Body *int
Optional map[string]bool
}
type labelField struct {
FieldIndex int
Name string
}
func getFieldTags(ty reflect.Type) *fieldTags {
ret := &fieldTags{
Attributes: map[string]int{},
Blocks: map[string]int{},
Optional: map[string]bool{},
}
ct := ty.NumField()
for i := range ct {
field := ty.Field(i)
tag := field.Tag.Get("hcl")
if tag == "" {
continue
}
comma := strings.Index(tag, ",")
var name, kind string
if comma != -1 {
name = tag[:comma]
kind = tag[comma+1:]
} else {
name = tag
kind = "attr"
}
switch kind {
case "attr":
ret.Attributes[name] = i
case "block":
ret.Blocks[name] = i
case "label":
ret.Labels = append(ret.Labels, labelField{
FieldIndex: i,
Name: name,
})
case "remain":
if ret.Remain != nil {
panic("only one 'remain' tag is permitted")
}
idx := i // copy, because this loop will continue assigning to i
ret.Remain = &idx
case "body":
if ret.Body != nil {
panic("only one 'body' tag is permitted")
}
idx := i // copy, because this loop will continue assigning to i
ret.Body = &idx
case "optional":
ret.Attributes[name] = i
ret.Optional[name] = true
default:
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
}
}
return ret
}

View File

@@ -1,233 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/hcl/v2"
)
func TestImpliedBodySchema(t *testing.T) {
tests := []struct {
val any
wantSchema *hcl.BodySchema
wantPartial bool
}{
{
struct{}{},
&hcl.BodySchema{},
false,
},
{
struct {
Ignored bool
}{},
&hcl.BodySchema{},
false,
},
{
struct {
Attr1 bool `hcl:"attr1"`
Attr2 bool `hcl:"attr2"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "attr1",
Required: true,
},
{
Name: "attr2",
Required: true,
},
},
},
false,
},
{
struct {
Attr *bool `hcl:"attr,attr"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "attr",
Required: false,
},
},
},
false,
},
{
struct {
Thing struct{} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
},
},
},
false,
},
{
struct {
Thing struct {
Type string `hcl:"type,label"`
Name string `hcl:"name,label"`
} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
LabelNames: []string{"type", "name"},
},
},
},
false,
},
{
struct {
Thing []struct {
Type string `hcl:"type,label"`
Name string `hcl:"name,label"`
} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
LabelNames: []string{"type", "name"},
},
},
},
false,
},
{
struct {
Thing *struct {
Type string `hcl:"type,label"`
Name string `hcl:"name,label"`
} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
LabelNames: []string{"type", "name"},
},
},
},
false,
},
{
struct {
Thing struct {
Name string `hcl:"name,label"`
Something string `hcl:"something"`
} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
LabelNames: []string{"name"},
},
},
},
false,
},
{
struct {
Doodad string `hcl:"doodad"`
Thing struct {
Name string `hcl:"name,label"`
} `hcl:"thing,block"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "doodad",
Required: true,
},
},
Blocks: []hcl.BlockHeaderSchema{
{
Type: "thing",
LabelNames: []string{"name"},
},
},
},
false,
},
{
struct {
Doodad string `hcl:"doodad"`
Config string `hcl:",remain"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "doodad",
Required: true,
},
},
},
true,
},
{
struct {
Expr hcl.Expression `hcl:"expr"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "expr",
Required: false,
},
},
},
false,
},
{
struct {
Meh string `hcl:"meh,optional"`
}{},
&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "meh",
Required: false,
},
},
},
false,
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
schema, partial := ImpliedBodySchema(test.val)
if !reflect.DeepEqual(schema, test.wantSchema) {
t.Errorf(
"wrong schema\ngot: %s\nwant: %s",
spew.Sdump(schema), spew.Sdump(test.wantSchema),
)
}
if partial != test.wantPartial {
t.Errorf(
"wrong partial flag\ngot: %#v\nwant: %#v",
partial, test.wantPartial,
)
}
})
}
}

View File

@@ -1,19 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package gohcl
import (
"reflect"
"github.com/hashicorp/hcl/v2"
)
var victimExpr hcl.Expression
var victimBody hcl.Body
var exprType = reflect.TypeOf(&victimExpr).Elem()
var bodyType = reflect.TypeOf(&victimBody).Elem()
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
var attrsType = reflect.TypeOf(hcl.Attributes(nil))

View File

@@ -7,16 +7,15 @@ import (
"math"
"math/big"
"reflect"
"slices"
"strconv"
"strings"
"github.com/docker/buildx/bake/hclparser/gohcl"
"github.com/docker/buildx/util/userfunc"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/gohcl"
"github.com/pkg/errors"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
)
type Opt struct {
@@ -42,7 +41,7 @@ type variableValidation struct {
type functionDef struct {
Name string `json:"-" hcl:"name,label"`
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
Variadic *hcl.Attribute `json:"variadic_params,omitempty" hcl:"variadic_params"`
Variadic *hcl.Attribute `json:"variadic_param,omitempty" hcl:"variadic_params"`
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
}
@@ -455,7 +454,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
}
// decode!
diag = decodeBody(body(), ectx, output.Interface())
diag = gohcl.DecodeBody(body(), ectx, output.Interface())
if diag.HasErrors() {
return diag
}
@@ -477,11 +476,11 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
}
// store the result into the evaluation context (so it can be referenced)
outputType, err := ImpliedType(output.Interface())
outputType, err := gocty.ImpliedType(output.Interface())
if err != nil {
return err
}
outputValue, err := ToCtyValue(output.Interface(), outputType)
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
if err != nil {
return err
}
@@ -493,12 +492,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
m = map[string]cty.Value{}
}
m[name] = outputValue
// The logical contents of this structure is similar to a map,
// but it's possible for some attributes to be different in a way that's
// illegal for a map so we use an object here instead which is structurally
// equivalent but allows disparate types for different keys.
p.ectx.Variables[block.Type] = cty.ObjectVal(m)
p.ectx.Variables[block.Type] = cty.MapVal(m)
}
return nil
@@ -556,64 +550,34 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, v := range vars {
for _, rule := range v.Validations {
resultVal, condDiags := rule.Condition.Value(ectx)
for _, validation := range v.Validations {
condition, condDiags := validation.Condition.Value(ectx)
if condDiags.HasErrors() {
diags = append(diags, condDiags...)
continue
}
if resultVal.IsNull() {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid condition result",
Detail: "Condition expression must return either true or false, not null.",
Subject: rule.Condition.Range().Ptr(),
Expression: rule.Condition,
})
continue
}
var err error
resultVal, err = convert.Convert(resultVal, cty.Bool)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid condition result",
Detail: fmt.Sprintf("Invalid condition result value: %s", err),
Subject: rule.Condition.Range().Ptr(),
Expression: rule.Condition,
})
continue
}
if !resultVal.True() {
message, msgDiags := rule.ErrorMessage.Value(ectx)
if !condition.True() {
message, msgDiags := validation.ErrorMessage.Value(ectx)
if msgDiags.HasErrors() {
diags = append(diags, msgDiags...)
continue
}
errorMessage := "This check failed, but has an invalid error message."
if !message.IsNull() {
errorMessage = message.AsString()
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Validation failed",
Detail: errorMessage,
Subject: rule.Condition.Range().Ptr(),
Detail: message.AsString(),
Subject: validation.Condition.Range().Ptr(),
})
}
}
}
return diags
}
type Variable struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
Value *string `json:"value,omitempty"`
Name string
Description string
Value *string
}
type ParseMeta struct {
@@ -621,7 +585,7 @@ type ParseMeta struct {
AllVariables []*Variable
}
func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
reserved := map[string]struct{}{}
schema, _ := gohcl.ImpliedBodySchema(val)
@@ -795,7 +759,7 @@ func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
types := map[string]field{}
renamed := map[string]map[string][]string{}
vt := reflect.ValueOf(val).Elem().Type()
for i := range vt.NumField() {
for i := 0; i < vt.NumField(); i++ {
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
@@ -863,7 +827,7 @@ func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
oldValue, exists := t.values[lblName]
if !exists && lblExists {
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
for i := range v.Elem().Field(t.idx).Len() {
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
exists = true
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
@@ -930,7 +894,7 @@ func wrapErrorDiagnostic(message string, err error, subject *hcl.Range, context
func setName(v reflect.Value, name string) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
for _, t := range parts[1:] {
if t == "label" {
@@ -942,23 +906,27 @@ func setName(v reflect.Value, name string) {
func getName(v reflect.Value) (string, bool) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
if slices.Contains(parts[1:], "label") {
for _, t := range parts[1:] {
if t == "label" {
return v.Elem().Field(i).String(), true
}
}
}
return "", false
}
func getNameIndex(v reflect.Value) (int, bool) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
if slices.Contains(parts[1:], "label") {
for _, t := range parts[1:] {
if t == "label" {
return i, true
}
}
}
return 0, false
}
@@ -1015,8 +983,3 @@ func key(ks ...any) uint64 {
}
return hash.Sum64()
}
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
return dec.DecodeBody(body, ctx, val)
}

View File

@@ -1,160 +0,0 @@
// MIT License
//
// Copyright (c) 2017-2018 Martin Atkins
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package hclparser
import (
"reflect"
"github.com/zclconf/go-cty/cty"
)
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
// to find a suitable cty.Type instance that could be used for a conversion
// with ToCtyValue.
//
// This allows -- for simple situations at least -- types to be defined just
// once in Go and the cty types derived from the Go types, but in the process
// it makes some assumptions that may be undesirable so applications are
// encouraged to build their cty types directly if exacting control is
// required.
//
// Not all Go types can be represented as cty types, so an error may be
// returned which is usually considered to be a bug in the calling program.
// In particular, ImpliedType will never use capsule types in its returned
// type, because it cannot know the capsule types supported by the calling
// program.
func ImpliedType(gv any) (cty.Type, error) {
rt := reflect.TypeOf(gv)
var path cty.Path
return impliedType(rt, path)
}
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
if ety, err := impliedTypeExt(rt, path); err == nil {
return ety, nil
}
switch rt.Kind() {
case reflect.Ptr:
return impliedType(rt.Elem(), path)
// Primitive types
case reflect.Bool:
return cty.Bool, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return cty.Number, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return cty.Number, nil
case reflect.Float32, reflect.Float64:
return cty.Number, nil
case reflect.String:
return cty.String, nil
// Collection types
case reflect.Slice:
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
ety, err := impliedType(rt.Elem(), path)
if err != nil {
return cty.NilType, err
}
return cty.List(ety), nil
case reflect.Map:
if !stringType.AssignableTo(rt.Key()) {
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
}
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
ety, err := impliedType(rt.Elem(), path)
if err != nil {
return cty.NilType, err
}
return cty.Map(ety), nil
// Structural types
case reflect.Struct:
return impliedStructType(rt, path)
default:
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
}
}
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
if valueType.AssignableTo(rt) {
// Special case: cty.Value represents cty.DynamicPseudoType, for
// type conformance checking.
return cty.DynamicPseudoType, nil
}
fieldIdxs := structTagIndices(rt)
if len(fieldIdxs) == 0 {
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
}
atys := make(map[string]cty.Type, len(fieldIdxs))
{
// Temporary extension of path for attributes
path := append(path, nil)
for k, fi := range fieldIdxs {
path[len(path)-1] = cty.GetAttrStep{Name: k}
ft := rt.Field(fi).Type
aty, err := impliedType(ft, path)
if err != nil {
return cty.NilType, err
}
atys[k] = aty
}
}
return cty.Object(atys), nil
}
var (
valueType = reflect.TypeOf(cty.Value{})
stringType = reflect.TypeOf("")
)
// structTagIndices interrogates the fields of the given type (which must
// be a struct type, or we'll panic) and returns a map from the cty
// attribute names declared via struct tags to the indices of the
// fields holding those tags.
//
// This function will panic if two fields within the struct are tagged with
// the same cty attribute name.
func structTagIndices(st reflect.Type) map[string]int {
ct := st.NumField()
ret := make(map[string]int, ct)
for i := range ct {
field := st.Field(i)
attrName := field.Tag.Get("cty")
if attrName != "" {
ret[attrName] = i
}
}
return ret
}

View File

@@ -1,166 +0,0 @@
package hclparser
import (
"reflect"
"sync"
"github.com/containerd/errdefs"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
)
type ToCtyValueConverter interface {
// ToCtyValue will convert this capsule value into a native
// cty.Value. This should not return a capsule type.
ToCtyValue() cty.Value
}
type FromCtyValueConverter interface {
// FromCtyValue will initialize this value using a cty.Value.
FromCtyValue(in cty.Value, path cty.Path) error
}
type extensionType int
const (
unwrapCapsuleValueExtension extensionType = iota
)
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
if rt.Kind() != reflect.Pointer {
rt = reflect.PointerTo(rt)
}
if isCapsuleType(rt) {
return capsuleValueCapsuleType(rt), nil
}
return cty.NilType, errdefs.ErrNotImplemented
}
func isCapsuleType(rt reflect.Type) bool {
fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
}
var capsuleValueTypes sync.Map
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
if rt.Kind() != reflect.Pointer {
panic("capsule value must be a pointer")
}
elem := rt.Elem()
if val, loaded := capsuleValueTypes.Load(elem); loaded {
return val.(cty.Type)
}
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
// First time used. Initialize new capsule ops.
ops := &cty.CapsuleOps{
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
return func(in cty.Value, p cty.Path) (any, error) {
rv := reflect.New(elem).Interface()
if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
return nil, err
}
return rv, nil
}
},
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
return func(in any, _ cty.Path) (cty.Value, error) {
rv := reflect.ValueOf(in).Convert(toCtyValueType)
v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
return convert.Convert(v, want)
}
},
ExtensionData: func(key any) any {
switch key {
case unwrapCapsuleValueExtension:
zero := reflect.Zero(elem).Interface()
if conv, ok := zero.(ToCtyValueConverter); ok {
return conv.ToCtyValue().Type()
}
zero = reflect.Zero(rt).Interface()
if conv, ok := zero.(ToCtyValueConverter); ok {
return conv.ToCtyValue().Type()
}
}
return nil
},
}
// Attempt to store the new type. Use whichever was loaded first in the case
// of a race condition.
ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
return val.(cty.Type)
}
// UnwrapCtyValue will unwrap capsule type values into their native cty value
// equivalents if possible.
func UnwrapCtyValue(in cty.Value) cty.Value {
want := toCtyValueType(in.Type())
if in.Type().Equals(want) {
return in
} else if out, err := convert.Convert(in, want); err == nil {
return out
}
return cty.NullVal(want)
}
func toCtyValueType(in cty.Type) cty.Type {
if et := in.MapElementType(); et != nil {
return cty.Map(toCtyValueType(*et))
}
if et := in.SetElementType(); et != nil {
return cty.Set(toCtyValueType(*et))
}
if et := in.ListElementType(); et != nil {
return cty.List(toCtyValueType(*et))
}
if in.IsObjectType() {
var optional []string
inAttrTypes := in.AttributeTypes()
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
for name, typ := range inAttrTypes {
outAttrTypes[name] = toCtyValueType(typ)
if in.AttributeOptional(name) {
optional = append(optional, name)
}
}
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
}
if in.IsTupleType() {
inTypes := in.TupleElementTypes()
outTypes := make([]cty.Type, len(inTypes))
for i, typ := range inTypes {
outTypes[i] = toCtyValueType(typ)
}
return cty.Tuple(outTypes)
}
if in.IsCapsuleType() {
if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
return out.(cty.Type)
}
return cty.DynamicPseudoType
}
return in
}
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
out, err := gocty.ToCtyValue(val, ty)
if err != nil {
return out, err
}
return UnwrapCtyValue(out), nil
}

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"io"
"maps"
"os"
"slices"
"strconv"
@@ -16,7 +15,7 @@ import (
"sync"
"time"
"github.com/containerd/containerd/v2/core/images"
"github.com/containerd/containerd/images"
"github.com/distribution/reference"
"github.com/docker/buildx/builder"
controllerapi "github.com/docker/buildx/controller/pb"
@@ -41,6 +40,7 @@ import (
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/entitlements"
"github.com/moby/buildkit/util/progress/progresswriter"
"github.com/moby/buildkit/util/tracing"
"github.com/opencontainers/go-digest"
@@ -63,7 +63,7 @@ type Options struct {
Inputs Inputs
Ref string
Allow []string
Allow []entitlements.Entitlement
Attests map[string]*string
BuildArgs map[string]string
CacheFrom []client.CacheOptionsEntry
@@ -205,6 +205,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
return nil, err
}
defers := make([]func(), 0, 2)
defer func() {
if err != nil {
for _, f := range defers {
f()
}
}
}()
reqForNodes := make(map[string][]*reqForNode)
eg, ctx := errgroup.WithContext(ctx)
@@ -234,11 +243,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
if err != nil {
return nil, err
}
defer release()
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
return nil, err
}
addGitAttrs(so)
defers = append(defers, release)
reqn = append(reqn, &reqForNode{
resolvedNode: np,
so: so,
@@ -423,7 +432,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
FrontendInputs: frontendInputs,
FrontendOpt: make(map[string]string),
}
maps.Copy(req.FrontendOpt, so.FrontendAttrs)
for k, v := range so.FrontendAttrs {
req.FrontendOpt[k] = v
}
so.Frontend = ""
so.FrontendInputs = nil
@@ -525,10 +536,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
}
}
}
node := dp.Node().Driver
if node.IsMobyDriver() {
for _, e := range so.Exports {
if e.Type == "moby" && e.Attrs["push"] != "" && !node.Features(ctx)[driver.DirectPush] {
if e.Type == "moby" && e.Attrs["push"] != "" {
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
pushNames = e.Attrs["name"]
if pushNames == "" {
@@ -560,14 +572,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
}
}
}
// if prefer-image-digest is set in the solver options, remove the image
// config digest from the exporter's response
for _, e := range so.Exports {
if e.Attrs["prefer-image-digest"] == "true" {
delete(rr.ExporterResponse, exptypes.ExporterImageConfigDigestKey)
break
}
}
return nil
})
}
@@ -619,7 +623,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
// This is fallback for some very old buildkit versions.
// Note that the mediatype isn't really correct as most of the time it is image manifest and
// not manifest list but actually both are handled because for Docker mediatypes the
// mediatype value in the Accept header does not seem to matter.
// mediatype value in the Accpet header does not seem to matter.
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
if ok {
descs = append(descs, specs.Descriptor{
@@ -831,7 +835,7 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
if err != nil {
return "", err
}
img, err := api.ImageInspect(ctx, name)
img, _, err := api.ImageInspectWithRaw(ctx, name)
if err != nil {
return "", err
}

View File

@@ -4,7 +4,6 @@ import (
"context"
stderrors "errors"
"net"
"slices"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
@@ -38,7 +37,15 @@ func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platfor
for _, ls := range resolved {
for _, rn := range ls {
if platform != nil {
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
p := *platform
var found bool
for _, pp := range rn.platforms {
if platforms.Only(p).Match(pp) {
found = true
break
}
}
if !found {
continue
}
}

View File

@@ -3,7 +3,6 @@ package build
import (
"context"
"fmt"
"slices"
"sync"
"github.com/containerd/platforms"
@@ -222,7 +221,7 @@ func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatf
for i, node := range r.nodes {
platforms := node.Platforms
if additionalPlatforms != nil {
platforms = slices.Clone(platforms)
platforms = append([]specs.Platform{}, platforms...)
platforms = append(platforms, additionalPlatforms(i, node)...)
}
for _, p2 := range platforms {

View File

@@ -2,7 +2,6 @@ package build
import (
"context"
"maps"
"os"
"path"
"path/filepath"
@@ -128,7 +127,9 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
if so.FrontendAttrs == nil {
so.FrontendAttrs = make(map[string]string)
}
maps.Copy(so.FrontendAttrs, res)
for k, v := range res {
so.FrontendAttrs[k] = v
}
if !setGitInfo || root == "" {
return

View File

@@ -9,7 +9,6 @@ import (
"testing"
"github.com/docker/buildx/util/gitutil"
"github.com/docker/buildx/util/gitutil/gittestutil"
"github.com/moby/buildkit/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
@@ -17,18 +16,18 @@ import (
)
func setupTest(tb testing.TB) {
gittestutil.Mktmp(tb)
gitutil.Mktmp(tb)
c, err := gitutil.New()
require.NoError(tb, err)
gittestutil.GitInit(c, tb)
gitutil.GitInit(c, tb)
df := []byte("FROM alpine:latest\n")
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
gittestutil.GitAdd(c, tb, "Dockerfile")
gittestutil.GitCommit(c, tb, "initial commit")
gittestutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
gitutil.GitAdd(c, tb, "Dockerfile")
gitutil.GitCommit(c, tb, "initial commit")
gitutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
}
func TestGetGitAttributesNotGitRepo(t *testing.T) {
@@ -189,19 +188,19 @@ func TestLocalDirs(t *testing.T) {
}
func TestLocalDirsSub(t *testing.T) {
gittestutil.Mktmp(t)
gitutil.Mktmp(t)
c, err := gitutil.New()
require.NoError(t, err)
gittestutil.GitInit(c, t)
gitutil.GitInit(c, t)
df := []byte("FROM alpine:latest\n")
require.NoError(t, os.MkdirAll("app", 0755))
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
gittestutil.GitAdd(c, t, "app/Dockerfile")
gittestutil.GitCommit(c, t, "initial commit")
gittestutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
gitutil.GitAdd(c, t, "app/Dockerfile")
gitutil.GitCommit(c, t, "initial commit")
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
so := &client.SolveOpt{
FrontendAttrs: map[string]string{},

View File

@@ -11,8 +11,8 @@ import (
"strings"
"syscall"
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/plugins/content/local"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/buildx/builder"
@@ -237,11 +237,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
return w, nil
}
// if docker is using the containerd snapshotter, prefer to export the image digest
// (rather than the image config digest). See https://github.com/moby/moby/issues/45458.
if features[dockerutil.OCIImporter] {
opt.Exports[i].Attrs["prefer-image-digest"] = "true"
}
}
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
@@ -323,7 +318,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
switch opt.NetworkMode {
case "host":
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
case "none":
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
case "", "default":

View File

@@ -5,12 +5,11 @@ import (
"encoding/base64"
"encoding/json"
"io"
"maps"
"strings"
"sync"
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/core/content/proxy"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/proxy"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
controlapi "github.com/moby/buildkit/api/services/control"
@@ -41,7 +40,9 @@ func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.Solve
if err != nil {
return err
}
maps.Copy(sr.ExporterResponse, res)
for k, v := range res {
sr.ExporterResponse[k] = v
}
return nil
})
}

View File

@@ -28,11 +28,11 @@ func TestSyncMultiReaderParallel(t *testing.T) {
readers := make([]io.ReadCloser, numReaders)
for i := range numReaders {
for i := 0; i < numReaders; i++ {
readers[i] = mr.NewReadCloser()
}
for i := range numReaders {
for i := 0; i < numReaders; i++ {
wg.Add(1)
go func(readerId int) {
defer wg.Done()

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"net/url"
"os"
"slices"
"sort"
"strings"
"sync"
@@ -200,7 +199,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
err = err1
}
if err == nil && len(errCh) > 0 {
if err == nil && len(errCh) == len(toBoot) {
return false, <-errCh
}
return true, err
@@ -657,7 +656,13 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
_ = flags.Parse(res)
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
var hasNetworkHostEntitlement bool
for _, e := range allowInsecureEntitlements {
if e == "network.host" {
hasNetworkHostEntitlement = true
break
}
}
var hasNetworkHostEntitlementInConf bool
if buildkitdConfigFile != "" {
@@ -666,8 +671,11 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
return nil, err
} else if btoml != nil {
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
if slices.Contains(ies.([]string), "network.host") {
for _, e := range ies.([]string) {
if e == "network.host" {
hasNetworkHostEntitlementInConf = true
break
}
}
}
}

View File

@@ -29,10 +29,7 @@ func TestCsvToMap(t *testing.T) {
}
func TestParseBuildkitdFlags(t *testing.T) {
dirConf := t.TempDir()
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
buildkitdConf := `
# debug enables additional debug logging
debug = true
# insecure-entitlements allows insecure entitlements, disabled by default.
@@ -40,18 +37,10 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
[log]
# log formatter: json or text
format = "text"
`), 0644))
buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
[worker.oci]
gc = "maybe"
`), 0644))
buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
foo = "bar"
`), 0644))
`
dirConf := t.TempDir()
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(buildkitdConf), 0644))
testCases := []struct {
name string
@@ -168,26 +157,6 @@ foo = "bar"
nil,
true,
},
{
"error parsing buildkit config",
"",
"docker-container",
nil,
buildkitdConfBrokenPath,
nil,
true,
},
{
"unknown field in buildkit config",
"",
"docker-container",
nil,
buildkitdConfUnknownFieldPath,
[]string{
"--allow-insecure-entitlement=network.host",
},
false,
},
}
for _, tt := range testCases {
tt := tt

View File

@@ -36,7 +36,6 @@ type Node struct {
Platforms []ocispecs.Platform
GCPolicy []client.PruneInfo
Labels map[string]string
CDIDevices []client.CDIDevice
}
// Nodes returns nodes for this builder.
@@ -169,7 +168,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
// dynamic nodes are used in Kubernetes driver.
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
for i := range di.DriverInfo.DynamicNodes {
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
diClone := di
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.Platforms = pl
@@ -260,7 +259,6 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
n.GCPolicy = w.GCPolicy
n.Labels = w.Labels
}
n.CDIDevices = w.CDIDevices
}
sort.Strings(n.IDs)
n.Platforms = platformutil.Dedupe(n.Platforms)

View File

@@ -4,21 +4,25 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/docker/buildx/commands"
controllererrors "github.com/docker/buildx/controller/errdefs"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli-plugins/metadata"
"github.com/docker/cli/cli-plugins/manager"
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/stack"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
//nolint:staticcheck // vendored dependencies may still use this
"github.com/containerd/containerd/pkg/seed"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
_ "github.com/docker/buildx/driver/docker"
@@ -31,13 +35,19 @@ import (
)
func init() {
//nolint:staticcheck
seed.WithTimeAndRand()
stack.SetVersionInfo(version.Version, version.Revision)
}
func runStandalone(cmd *command.DockerCli) error {
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
return err
}
defer flushMetrics(cmd)
executable := os.Args[0]
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
return rootCmd.Execute()
}
@@ -58,7 +68,7 @@ func flushMetrics(cmd *command.DockerCli) {
func runPlugin(cmd *command.DockerCli) error {
rootCmd := commands.NewRootCmd("buildx", true, cmd)
return plugin.RunPlugin(cmd, rootCmd, metadata.Metadata{
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
SchemaVersion: "0.1.0",
Vendor: "Docker Inc.",
Version: version.Version,
@@ -111,6 +121,11 @@ func main() {
var ebr *desktop.ErrorWithBuildRef
if errors.As(err, &ebr) {
ebr.Print(cmd.Err())
} else {
var be *controllererrors.BuildError
if errors.As(err, &be) {
be.PrintBuildDetails(cmd.Err())
}
}
os.Exit(1)

View File

@@ -25,6 +25,7 @@ import (
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/localstate"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop"
@@ -37,14 +38,15 @@ import (
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/tonistiigi/go-csvvalue"
"go.opentelemetry.io/otel/attribute"
)
type bakeOptions struct {
files []string
overrides []string
printOnly bool
listTargets bool
listVars bool
sbom string
provenance string
allow []string
@@ -54,23 +56,12 @@ type bakeOptions struct {
exportPush bool
exportLoad bool
callFunc string
print bool
list string
// TODO: remove deprecated flags
listTargets bool
listVars bool
}
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
mp := dockerCli.MeterProvider()
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
attribute.String("builder", in.builder),
attribute.StringSlice("targets", targets),
attribute.StringSlice("files", in.files),
)
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
if err != nil {
return err
}
@@ -130,13 +121,9 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
var nodes []builder.Node
var progressConsoleDesc, progressTextDesc string
if in.print && in.list != "" {
return errors.New("--print and --list are mutually exclusive")
}
// instance only needed for reading remote bake files or building
var driverType string
if url != "" || !(in.print || in.list != "") {
if url != "" || !(in.printOnly || in.listTargets || in.listVars) {
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithContextPathHash(contextPathHash),
@@ -197,7 +184,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
}
if in.list != "" {
if in.listTargets || in.listVars {
cfg, pm, err := bake.ParseFiles(files, defaults)
if err != nil {
return err
@@ -205,15 +192,10 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
if err = printer.Wait(); err != nil {
return err
}
list, err := parseList(in.list)
if err != nil {
return err
}
switch list.Type {
case "targets":
return printTargetList(dockerCli.Out(), list.Format, cfg)
case "variables":
return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
if in.listTargets {
return printTargetList(dockerCli.Out(), cfg)
} else if in.listVars {
return printVars(dockerCli.Out(), pm.AllVariables)
}
}
@@ -249,7 +231,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
Target: tgts,
}
if in.print {
if in.printOnly {
if err = printer.Wait(); err != nil {
return err
}
@@ -275,11 +257,9 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
if err != nil {
return err
}
if progressMode != progressui.RawJSONMode {
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
return err
}
}
if printer.IsDone() {
// init new printer as old one was stopped to show the prompt
if err := makePrinter(); err != nil {
@@ -287,7 +267,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
}
}
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
return err
}
@@ -309,7 +289,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
}
if len(in.metadataFile) > 0 {
dt := make(map[string]any)
dt := make(map[string]interface{})
for t, r := range resp {
dt[t] = decodeExporterResponse(r.ExporterResponse)
}
@@ -424,14 +404,6 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
fmt.Fprintln(dockerCli.Out(), string(dt))
}
for _, name := range names {
if sp, ok := resp[name]; ok {
if v, ok := sp.ExporterResponse["frontend.result.inlinemessage"]; ok {
fmt.Fprintf(dockerCli.Out(), "\n# %s\n%s\n", name, v)
}
}
}
if exitCode != 0 {
os.Exit(exitCode)
}
@@ -455,13 +427,6 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
if !cmd.Flags().Lookup("pull").Changed {
cFlags.pull = nil
}
if options.list == "" {
if options.listTargets {
options.list = "targets"
} else if options.listVars {
options.list = "variables"
}
}
options.builder = rootOpts.builder
options.metadataFile = cFlags.metadataFile
// Other common flags (noCache, pull and progress) are processed in runBake function.
@@ -474,6 +439,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
@@ -484,30 +450,20 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
flags.Lookup("check").NoOptDefVal = "true"
flags.BoolVar(&options.print, "print", false, "Print the options without building")
flags.StringVar(&options.list, "list", "", "List targets or variables")
// TODO: remove deprecated flags
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
cobrautil.MarkFlagsExperimental(flags, "list-targets")
flags.MarkHidden("list-targets")
flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
cobrautil.MarkFlagsExperimental(flags, "list-variables")
flags.MarkHidden("list-variables")
flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
commonBuildFlags(&cFlags, flags)
return cmd
}
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
l, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
defer l.MigrateIfNeeded()
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
prm := confutil.MetadataProvenance()
if len(in.metadataFile) == 0 {
prm = confutil.MetadataProvenanceModeDisabled
@@ -527,10 +483,19 @@ func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string
if len(refs) == 0 {
return nil
}
l, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
dtdef, err := json.MarshalIndent(def, "", " ")
if err != nil {
return err
}
return l.SaveGroup(groupRef, localstate.StateGroup{
Refs: refs,
Definition: dtdef,
Targets: targets,
Inputs: overrides,
Refs: refs,
})
}
@@ -592,70 +557,10 @@ func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names
return
}
type listEntry struct {
Type string
Format string
}
func parseList(input string) (listEntry, error) {
res := listEntry{}
fields, err := csvvalue.Fields(input, nil)
if err != nil {
return res, err
}
if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
res.Type = input
}
if res.Type == "" {
for _, field := range fields {
key, value, ok := strings.Cut(field, "=")
if !ok {
return res, errors.Errorf("invalid value %s", field)
}
key = strings.TrimSpace(strings.ToLower(key))
switch key {
case "type":
res.Type = value
case "format":
res.Format = value
default:
return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
}
if res.Format == "" {
res.Format = "table"
}
switch res.Type {
case "targets", "variables":
default:
return res, errors.Errorf("invalid list type %q", res.Type)
}
switch res.Format {
case "table", "json":
default:
return res, errors.Errorf("invalid list format %q", res.Format)
}
return res, nil
}
func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
func printVars(w io.Writer, vars []*hclparser.Variable) error {
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
return cmp.Compare(a.Name, b.Name)
})
if format == "json" {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(vars)
}
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
defer tw.Flush()
@@ -673,7 +578,12 @@ func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
return nil
}
func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
func printTargetList(w io.Writer, cfg *bake.Config) error {
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
defer tw.Flush()
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
type targetOrGroup struct {
name string
target *bake.Target
@@ -692,20 +602,6 @@ func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
return cmp.Compare(a.name, b.name)
})
var tw *tabwriter.Writer
if format == "table" {
tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
defer tw.Flush()
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
}
type targetList struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
Group bool `json:"group,omitempty"`
}
var targetsList []targetList
for _, tgt := range list {
if strings.HasPrefix(tgt.name, "_") {
// convention for a private target
@@ -714,9 +610,9 @@ func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
var descr string
if tgt.target != nil {
descr = tgt.target.Description
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
} else if tgt.group != nil {
descr = tgt.group.Description
if len(tgt.group.Targets) > 0 {
slices.Sort(tgt.group.Targets)
names := strings.Join(tgt.group.Targets, ", ")
@@ -726,18 +622,9 @@ func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
descr = names
}
}
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
}
if format == "table" {
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
}
}
if format == "json" {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(targetsList)
}
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@@ -42,6 +41,7 @@ import (
"github.com/docker/cli/cli/command"
dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend/subrequests"
@@ -52,7 +52,6 @@ import (
solverpb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/moby/sys/atomicwriter"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -104,10 +103,12 @@ type buildOptions struct {
exportPush bool
exportLoad bool
control.ControlOptions
invokeConfig *invokeConfig
}
func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error) {
var err error
buildArgs, err := listToMap(o.buildArgs, true)
@@ -120,7 +121,7 @@ func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
return nil, err
}
opts := cbuild.Options{
opts := controllerapi.BuildOptions{
Allow: o.allow,
Annotations: o.annotations,
BuildArgs: buildArgs,
@@ -155,7 +156,7 @@ func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
return nil, err
}
inAttests := slices.Clone(o.attests)
inAttests := append([]string{}, o.attests...)
if o.provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
}
@@ -182,17 +183,14 @@ func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
}
}
cacheFrom, err := buildflags.ParseCacheEntry(o.cacheFrom)
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
if err != nil {
return nil, err
}
opts.CacheFrom = cacheFrom.ToPB()
cacheTo, err := buildflags.ParseCacheEntry(o.cacheTo)
opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
if err != nil {
return nil, err
}
opts.CacheTo = cacheTo.ToPB()
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
if err != nil {
@@ -284,11 +282,7 @@ func (o *buildOptionsHash) String() string {
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
mp := dockerCli.MeterProvider()
ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
attribute.String("builder", options.builder),
attribute.String("context", options.contextPath),
attribute.String("dockerfile", options.dockerfileName),
)
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
if err != nil {
return err
}
@@ -402,10 +396,6 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
os.Exit(exitcode)
}
}
if v, ok := resp.ExporterResponse["frontend.result.inlinemessage"]; ok {
fmt.Fprintf(dockerCli.Out(), "\n%s\n", v)
return nil
}
return nil
}
@@ -418,7 +408,7 @@ func getImageID(resp map[string]string) string {
return dgst
}
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, opts, dockerCli.In(), printer, false)
if res != nil {
res.Done()
@@ -426,12 +416,15 @@ func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Opti
return resp, dfmap, err
}
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
// stdin must be usable for monitor
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
}
c := controller.NewController(ctx, dockerCli)
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
if err != nil {
return nil, nil, err
}
defer func() {
if err := c.Close(); err != nil {
logrus.Warnf("failed to close server connection %v", err)
@@ -440,7 +433,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
// NOTE: buildx server has the current working directory different from the client
// so we need to resolve paths to abosolute ones in the client.
opts, err := cbuild.ResolveOptionPaths(opts)
opts, err = controllerapi.ResolveOptionPaths(opts)
if err != nil {
return nil, nil, err
}
@@ -466,10 +459,11 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
})
}
resp, inputs, err = c.Build(ctx, opts, pr, printer)
ref, resp, inputs, err = c.Build(ctx, opts, pr, printer)
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {
ref = be.Ref
retErr = err
// We can proceed to monitor
} else {
@@ -509,8 +503,8 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
resp, retErr = monitorBuildResult.Resp, monitorBuildResult.Err
}
} else {
if err := c.Close(); err != nil {
logrus.Warnf("close error: %v", err)
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
}
@@ -596,7 +590,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
@@ -647,6 +641,14 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--attest=type=provenance"`)
if confutil.IsExperimental() {
// TODO: move this to debug command if needed
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect")
flags.BoolVar(&options.Detach, "detach", false, "Detach buildx server (supported only on linux)")
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server)")
cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config")
}
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
flags.Lookup("check").NoOptDefVal = "true"
@@ -718,7 +720,7 @@ type commonFlags struct {
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "quiet", "plain", "tty", "rawjson"). Use plain to show container output`)
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
}
@@ -735,15 +737,15 @@ func checkWarnedFlags(f *pflag.Flag) {
}
}
func writeMetadataFile(filename string, dt any) error {
func writeMetadataFile(filename string, dt interface{}) error {
b, err := json.MarshalIndent(dt, "", " ")
if err != nil {
return err
}
return atomicwriter.WriteFile(filename, b, 0644)
return ioutils.AtomicWriteFile(filename, b, 0644)
}
func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
decFunc := func(k, v string) ([]byte, error) {
if k == "result.json" {
// result.json is part of metadata response for subrequests which
@@ -752,16 +754,16 @@ func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
}
return base64.StdEncoding.DecodeString(v)
}
out := make(map[string]any)
out := make(map[string]interface{})
for k, v := range exporterResponse {
dt, err := decFunc(k, v)
if err != nil {
out[k] = v
continue
}
var raw map[string]any
var raw map[string]interface{}
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
var rawList []map[string]any
var rawList []map[string]interface{}
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
out[k] = v
continue
@@ -1003,12 +1005,12 @@ func (cfg *invokeConfig) needsDebug(retErr error) bool {
}
}
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cbuild.Options, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *controllerapi.BuildOptions, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
con := console.Current()
if err := con.SetRaw(); err != nil {
// TODO: run disconnect in build command (on error case)
if err := c.Close(); err != nil {
logrus.Warnf("close error: %v", err)
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
return nil, errors.Errorf("failed to configure terminal: %v", err)
}

View File

@@ -3,9 +3,11 @@ package debug
import (
"context"
"os"
"runtime"
"github.com/containerd/console"
"github.com/docker/buildx/controller"
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/util/cobrautil"
@@ -33,6 +35,7 @@ type DebuggableCmd interface {
}
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
var controlOptions control.ControlOptions
var progressMode string
var options DebugConfig
@@ -47,7 +50,10 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
}
ctx := context.TODO()
c := controller.NewController(ctx, dockerCli)
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
if err != nil {
return err
}
defer func() {
if err := c.Close(); err != nil {
logrus.Warnf("failed to close server connection %v", err)
@@ -70,9 +76,13 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
flags := cmd.Flags()
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
cobrautil.MarkFlagsExperimental(flags, "invoke", "on")
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
for _, c := range children {
cmd.AddCommand(c.NewDebugger(&options))

View File

@@ -124,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd
}
func printKV(w io.Writer, k string, v any) {
func printKV(w io.Writer, k string, v interface{}) {
fmt.Fprintf(w, "%s:\t%v\n", k, v)
}

View File

@@ -1,160 +0,0 @@
package history
import (
"context"
"io"
"os"
"slices"
"github.com/containerd/console"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop/bundle"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type exportOptions struct {
builder string
refs []string
output string
all bool
}
func runExport(ctx context.Context, dockerCli command.Cli, opts exportOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, builder.WithData())
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
if len(opts.refs) == 0 {
opts.refs = []string{""}
}
var res []historyRecord
for _, ref := range opts.refs {
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
CompletedOnly: true,
})
if err != nil {
return err
}
if len(recs) == 0 {
if ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", ref)
}
if ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
if opts.all {
res = append(res, recs...)
break
} else {
res = append(res, recs[0])
}
}
ls, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
visited := map[*builder.Node]struct{}{}
var clients []*client.Client
for _, rec := range res {
if _, ok := visited[rec.node]; ok {
continue
}
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
}
clients = append(clients, c)
}
toExport := make([]*bundle.Record, 0, len(res))
for _, rec := range res {
var defaultPlatform string
if p := rec.node.Platforms; len(p) > 0 {
defaultPlatform = platforms.FormatAll(platforms.Normalize(p[0]))
}
var stg *localstate.StateGroup
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
if st != nil && st.GroupRef != "" {
stg, err = ls.ReadGroup(st.GroupRef)
if err != nil {
return err
}
}
toExport = append(toExport, &bundle.Record{
BuildHistoryRecord: rec.BuildHistoryRecord,
DefaultPlatform: defaultPlatform,
LocalState: st,
StateGroup: stg,
})
}
var w io.Writer = os.Stdout
if opts.output != "" {
f, err := os.Create(opts.output)
if err != nil {
return errors.Wrapf(err, "failed to create output file %q", opts.output)
}
defer f.Close()
w = f
} else {
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
return errors.Errorf("refusing to write to console, use --output to specify a file")
}
}
return bundle.Export(ctx, clients, w, toExport)
}
func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options exportOptions
cmd := &cobra.Command{
Use: "export [OPTIONS] [REF]",
Short: "Export a build into Docker Desktop bundle",
RunE: func(cmd *cobra.Command, args []string) error {
if options.all && len(args) > 0 {
return errors.New("cannot specify refs when using --all")
}
options.refs = args
options.builder = *rootOpts.Builder
return runExport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVarP(&options.output, "output", "o", "", "Output file path")
flags.BoolVar(&options.all, "all", false, "Export all records for the builder")
return cmd
}

View File

@@ -1,135 +0,0 @@
package history
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"strings"
remoteutil "github.com/docker/buildx/driver/remote/util"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/desktop"
"github.com/docker/cli/cli/command"
"github.com/pkg/browser"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type importOptions struct {
file []string
}
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
sock, err := desktop.BuildServerAddr()
if err != nil {
return err
}
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
network, addr, ok := strings.Cut(sock, "://")
if !ok {
return nil, errors.Errorf("invalid endpoint address: %s", sock)
}
return remoteutil.DialContext(ctx, network, addr)
}
client := &http.Client{
Transport: tr,
}
var urls []string
if len(opts.file) == 0 {
u, err := importFrom(ctx, client, os.Stdin)
if err != nil {
return err
}
urls = append(urls, u...)
} else {
for _, fn := range opts.file {
var f *os.File
var rdr io.Reader = os.Stdin
if fn != "-" {
f, err = os.Open(fn)
if err != nil {
return errors.Wrapf(err, "failed to open file %s", fn)
}
rdr = f
}
u, err := importFrom(ctx, client, rdr)
if err != nil {
return err
}
urls = append(urls, u...)
if f != nil {
f.Close()
}
}
}
if len(urls) == 0 {
return errors.New("no build records found in the bundle")
}
for i, url := range urls {
fmt.Fprintln(dockerCli.Err(), url)
if i == 0 {
err = browser.OpenURL(url)
}
}
return err
}
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
if err != nil {
return nil, errors.Wrap(err, "failed to create request")
}
resp, err := c.Do(req)
if err != nil {
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, errors.Errorf("failed to import build: %s", string(body))
}
var refs []string
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&refs); err != nil {
return nil, errors.Wrap(err, "failed to decode response")
}
var urls []string
for _, ref := range refs {
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
}
return urls, err
}
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
var options importOptions
cmd := &cobra.Command{
Use: "import [OPTIONS] < bundle.dockerbuild",
Short: "Import a build into Docker Desktop",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runImport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
return cmd
}

View File

@@ -1,893 +0,0 @@
package history
import (
"bytes"
"cmp"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/tabwriter"
"text/template"
"time"
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/core/content/proxy"
"github.com/containerd/containerd/v2/core/images"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/cli/debug"
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/solver/errdefs"
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/stack"
"github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/tonistiigi/go-csvvalue"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
proto "google.golang.org/protobuf/proto"
)
type statusT string
const (
statusComplete statusT = "completed"
statusRunning statusT = "running"
statusError statusT = "failed"
statusCanceled statusT = "canceled"
)
type inspectOptions struct {
builder string
ref string
format string
}
type inspectOutput struct {
Name string `json:",omitempty"`
Ref string
Context string `json:",omitempty"`
Dockerfile string `json:",omitempty"`
VCSRepository string `json:",omitempty"`
VCSRevision string `json:",omitempty"`
Target string `json:",omitempty"`
Platform []string `json:",omitempty"`
KeepGitDir bool `json:",omitempty"`
NamedContexts []keyValueOutput `json:",omitempty"`
StartedAt *time.Time `json:",omitempty"`
CompletedAt *time.Time `json:",omitempty"`
Duration time.Duration `json:",omitempty"`
Status statusT `json:",omitempty"`
Error *errorOutput `json:",omitempty"`
NumCompletedSteps int32
NumTotalSteps int32
NumCachedSteps int32
BuildArgs []keyValueOutput `json:",omitempty"`
Labels []keyValueOutput `json:",omitempty"`
Config configOutput `json:",omitempty"`
Materials []materialOutput `json:",omitempty"`
Attachments []attachmentOutput `json:",omitempty"`
Errors []string `json:",omitempty"`
}
type configOutput struct {
Network string `json:",omitempty"`
ExtraHosts []string `json:",omitempty"`
Hostname string `json:",omitempty"`
CgroupParent string `json:",omitempty"`
ImageResolveMode string `json:",omitempty"`
MultiPlatform bool `json:",omitempty"`
NoCache bool `json:",omitempty"`
NoCacheFilter []string `json:",omitempty"`
ShmSize string `json:",omitempty"`
Ulimit string `json:",omitempty"`
CacheMountNS string `json:",omitempty"`
DockerfileCheckConfig string `json:",omitempty"`
SourceDateEpoch string `json:",omitempty"`
SandboxHostname string `json:",omitempty"`
RestRaw []keyValueOutput `json:",omitempty"`
}
type materialOutput struct {
URI string `json:",omitempty"`
Digests []string `json:",omitempty"`
}
type attachmentOutput struct {
Digest string `json:",omitempty"`
Platform string `json:",omitempty"`
Type string `json:",omitempty"`
}
type errorOutput struct {
Code int `json:",omitempty"`
Message string `json:",omitempty"`
Name string `json:",omitempty"`
Logs []string `json:",omitempty"`
Sources []byte `json:",omitempty"`
Stack []byte `json:",omitempty"`
}
type keyValueOutput struct {
Name string `json:",omitempty"`
Value string `json:",omitempty"`
}
func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
if sv, ok := attrs[k]; ok {
if f != nil {
v, ok := f(sv)
if ok {
*dest = v
}
}
if d, ok := any(dest).(*string); ok {
*d = sv
}
}
delete(attrs, k)
}
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
if err != nil {
return err
}
if len(recs) == 0 {
if opts.ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", opts.ref)
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
}
store := proxy.NewContentStore(c.ContentClient())
var defaultPlatform string
workers, err := c.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "failed to list workers")
}
workers0:
for _, w := range workers {
for _, p := range w.Platforms {
defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
break workers0
}
}
ls, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
attrs := rec.FrontendAttrs
delete(attrs, "frontend.caps")
var out inspectOutput
var context string
var dockerfile string
if st != nil {
context = st.LocalPath
dockerfile = st.DockerfilePath
wd, _ := os.Getwd()
if dockerfile != "" && dockerfile != "-" {
if rel, err := filepath.Rel(context, dockerfile); err == nil {
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
dockerfile = rel
}
}
}
if context != "" {
if rel, err := filepath.Rel(wd, context); err == nil {
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
context = rel
}
}
}
}
if v, ok := attrs["context"]; ok && context == "" {
delete(attrs, "context")
context = v
}
if dockerfile == "" {
if v, ok := attrs["filename"]; ok {
dockerfile = v
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
dockerfile = filepath.Join(dfdir, dockerfile)
}
}
}
delete(attrs, "filename")
out.Name = buildName(rec.FrontendAttrs, st)
out.Ref = rec.Ref
out.Context = context
out.Dockerfile = dockerfile
if _, ok := attrs["context"]; !ok {
if src, ok := attrs["vcs:source"]; ok {
out.VCSRepository = src
}
if rev, ok := attrs["vcs:revision"]; ok {
out.VCSRevision = rev
}
}
readAttr(attrs, "target", &out.Target, nil)
readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
var pp []string
for _, v := range strings.Split(v, ",") {
p, err := platforms.Parse(v)
if err != nil {
return nil, err
}
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
}
if len(pp) == 0 {
pp = append(pp, defaultPlatform)
}
return pp, nil
})
})
readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
return tryParseValue(v, &out.Errors, strconv.ParseBool)
})
out.NamedContexts = readKeyValues(attrs, "context:")
if rec.CreatedAt != nil {
tm := rec.CreatedAt.AsTime().Local()
out.StartedAt = &tm
}
out.Status = statusRunning
if rec.CompletedAt != nil {
tm := rec.CompletedAt.AsTime().Local()
out.CompletedAt = &tm
out.Status = statusComplete
}
if rec.Error != nil || rec.ExternalError != nil {
out.Error = &errorOutput{}
if rec.Error != nil {
if codes.Code(rec.Error.Code) == codes.Canceled {
out.Status = statusCanceled
} else {
out.Status = statusError
}
out.Error.Code = int(codes.Code(rec.Error.Code))
out.Error.Message = rec.Error.Message
}
if rec.ExternalError != nil {
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
if err != nil {
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
}
var st spb.Status
if err := proto.Unmarshal(dt, &st); err != nil {
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
}
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
var errsources bytes.Buffer
for _, s := range errdefs.Sources(retErr) {
s.Print(&errsources)
errsources.WriteString("\n")
}
out.Error.Sources = errsources.Bytes()
var ve *errdefs.VertexError
if errors.As(retErr, &ve) {
dgst, err := digest.Parse(ve.Vertex.Digest)
if err != nil {
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
}
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
if err != nil {
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
}
out.Error.Name = name
out.Error.Logs = logs
}
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
}
}
if out.StartedAt != nil {
if out.CompletedAt != nil {
out.Duration = out.CompletedAt.Sub(*out.StartedAt)
} else {
out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
}
}
out.NumCompletedSteps = rec.NumCompletedSteps
out.NumTotalSteps = rec.NumTotalSteps
out.NumCachedSteps = rec.NumCachedSteps
out.BuildArgs = readKeyValues(attrs, "build-arg:")
out.Labels = readKeyValues(attrs, "label:")
readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
readAttr(attrs, "hostname", &out.Config.Hostname, nil)
readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
return tryParseValue(v, &out.Errors, strconv.ParseBool)
})
readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
return tryParseValue(v, &out.Errors, strconv.ParseBool)
})
readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
if v == "" {
return true, true
}
return false, false
})
readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
if v == "" {
return nil, false
}
return strings.Split(v, ","), true
})
readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
fields, err := csvvalue.Fields(v, nil)
if err != nil {
return nil, err
}
return fields, nil
})
})
readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
var unusedAttrs []keyValueOutput
for k := range attrs {
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
continue
}
unusedAttrs = append(unusedAttrs, keyValueOutput{
Name: k,
Value: attrs[k],
})
}
slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
return cmp.Compare(a.Name, b.Name)
})
out.Config.RestRaw = unusedAttrs
attachments, err := allAttachments(ctx, store, *rec)
if err != nil {
return err
}
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
})
if provIndex != -1 {
prov := attachments[provIndex]
dt, err := content.ReadBlob(ctx, store, prov.descr)
if err != nil {
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
}
var pred provenancetypes.ProvenancePredicate
if err := json.Unmarshal(dt, &pred); err != nil {
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
}
for _, m := range pred.Materials {
out.Materials = append(out.Materials, materialOutput{
URI: m.URI,
Digests: digestSetToDigests(m.Digest),
})
}
}
if len(attachments) > 0 {
for _, a := range attachments {
p := ""
if a.platform != nil {
p = platforms.FormatAll(*a.platform)
}
out.Attachments = append(out.Attachments, attachmentOutput{
Digest: a.descr.Digest.String(),
Platform: p,
Type: descrType(a.descr),
})
}
}
if opts.format == formatter.JSONFormatKey {
enc := json.NewEncoder(dockerCli.Out())
enc.SetIndent("", " ")
return enc.Encode(out)
} else if opts.format != formatter.PrettyFormatKey {
tmpl, err := template.New("inspect").Parse(opts.format)
if err != nil {
return errors.Wrapf(err, "failed to parse format template")
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, out); err != nil {
return errors.Wrapf(err, "failed to execute format template")
}
fmt.Fprintln(dockerCli.Out(), buf.String())
return nil
}
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
if out.Name != "" {
fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
}
if opts.ref == "" && out.Ref != "" {
fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
}
if out.Context != "" {
fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
}
if out.Dockerfile != "" {
fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
}
if out.VCSRepository != "" {
fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
}
if out.VCSRevision != "" {
fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
}
if out.Target != "" {
fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
}
if len(out.Platform) > 0 {
fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
}
if out.KeepGitDir {
fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
}
tw.Flush()
fmt.Fprintln(dockerCli.Out())
printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
var statusStr string
if out.Status == statusRunning {
statusStr = " (running)"
}
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
if out.Status == statusError {
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
} else if out.Status == statusCanceled {
fmt.Fprintf(tw, "Status:\tCanceled\n")
}
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
tw.Flush()
fmt.Fprintln(dockerCli.Out())
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
if out.Config.Network != "" {
fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
}
if out.Config.Hostname != "" {
fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
}
if len(out.Config.ExtraHosts) > 0 {
fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
}
if out.Config.CgroupParent != "" {
fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
}
if out.Config.ImageResolveMode != "" {
fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
}
if out.Config.MultiPlatform {
fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
}
if out.Config.NoCache {
fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
}
if len(out.Config.NoCacheFilter) > 0 {
fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
}
if out.Config.ShmSize != "" {
fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
}
if out.Config.Ulimit != "" {
fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
}
if out.Config.CacheMountNS != "" {
fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
}
if out.Config.DockerfileCheckConfig != "" {
fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
}
if out.Config.SourceDateEpoch != "" {
fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
}
if out.Config.SandboxHostname != "" {
fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
}
for _, kv := range out.Config.RestRaw {
fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
}
tw.Flush()
fmt.Fprintln(dockerCli.Out())
printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
printTable(dockerCli.Out(), out.Labels, "Label")
if len(out.Materials) > 0 {
fmt.Fprintln(dockerCli.Out(), "Materials:")
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
fmt.Fprintf(tw, "URI\tDIGEST\n")
for _, m := range out.Materials {
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
}
tw.Flush()
fmt.Fprintln(dockerCli.Out())
}
if len(out.Attachments) > 0 {
fmt.Fprintf(tw, "Attachments:\n")
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
for _, a := range out.Attachments {
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
}
tw.Flush()
fmt.Fprintln(dockerCli.Out())
}
if out.Error != nil {
if out.Error.Sources != nil {
fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
}
if len(out.Error.Logs) > 0 {
fmt.Fprintln(dockerCli.Out(), "Logs:")
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
for _, l := range out.Error.Logs {
fmt.Fprintln(dockerCli.Out(), "> "+l)
}
fmt.Fprintln(dockerCli.Out())
}
if len(out.Error.Stack) > 0 {
if debug.IsEnabled() {
fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
} else {
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
}
}
}
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
return nil
}
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options inspectOptions
cmd := &cobra.Command{
Use: "inspect [OPTIONS] [REF]",
Short: "Inspect a build",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.ref = args[0]
}
options.builder = *rootOpts.Builder
return runInspect(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
cmd.AddCommand(
attachmentCmd(dockerCli, rootOpts),
)
flags := cmd.Flags()
flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
return cmd
}
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
Ref: ref,
})
if err != nil {
return "", nil, err
}
var name string
var logs []string
lastState := map[int]int{}
loop0:
for {
select {
case <-ctx.Done():
st.CloseSend()
return "", nil, context.Cause(ctx)
default:
ev, err := st.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break loop0
}
return "", nil, err
}
ss := client.NewSolveStatus(ev)
for _, v := range ss.Vertexes {
if v.Digest == dgst {
name = v.Name
break
}
}
for _, l := range ss.Logs {
if l.Vertex == dgst {
parts := bytes.Split(l.Data, []byte("\n"))
for i, p := range parts {
var wrote bool
if i == 0 {
idx, ok := lastState[l.Stream]
if ok && idx != -1 {
logs[idx] = logs[idx] + string(p)
wrote = true
}
}
if !wrote {
if len(p) > 0 {
logs = append(logs, string(p))
}
lastState[l.Stream] = len(logs) - 1
}
if i == len(parts)-1 && len(p) == 0 {
lastState[l.Stream] = -1
}
}
}
}
}
}
if limit > 0 && len(logs) > limit {
logs = logs[len(logs)-limit:]
}
return name, logs, nil
}
type attachment struct {
platform *ocispecs.Platform
descr ocispecs.Descriptor
}
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
var attachments []attachment
if rec.Result != nil {
for _, a := range rec.Result.Attestations {
attachments = append(attachments, attachment{
descr: ociDesc(a),
})
}
for _, r := range rec.Result.Results {
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
}
}
for key, ri := range rec.Results {
p, err := platforms.Parse(key)
if err != nil {
return nil, err
}
for _, a := range ri.Attestations {
attachments = append(attachments, attachment{
platform: &p,
descr: ociDesc(a),
})
}
for _, r := range ri.Results {
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
}
}
slices.SortFunc(attachments, func(a, b attachment) int {
pCmp := 0
if a.platform == nil && b.platform != nil {
return -1
} else if a.platform != nil && b.platform == nil {
return 1
} else if a.platform != nil && b.platform != nil {
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
}
return cmp.Or(
pCmp,
cmp.Compare(descrType(a.descr), descrType(b.descr)),
)
})
return attachments, nil
}
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
_, err := store.Info(ctx, desc.Digest)
if err != nil {
return nil
}
var out []attachment
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
out = append(out, attachment{platform: platform, descr: desc})
}
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
return out
}
dt, err := content.ReadBlob(ctx, store, desc)
if err != nil {
return out
}
var idx ocispecs.Index
if err := json.Unmarshal(dt, &idx); err != nil {
return out
}
for _, d := range idx.Manifests {
p := platform
if d.Platform != nil {
p = d.Platform
}
out = append(out, walkAttachments(ctx, store, d, p)...)
}
return out
}
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
return ocispecs.Descriptor{
MediaType: in.MediaType,
Digest: digest.Digest(in.Digest),
Size: in.Size,
Annotations: in.Annotations,
}
}
func descrType(desc ocispecs.Descriptor) string {
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
return typ
}
return desc.MediaType
}
func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
v, err := f(s)
if err != nil {
errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
*errs = append(*errs, errStr)
}
return v, true
}
func printTable(w io.Writer, kvs []keyValueOutput, title string) {
if len(kvs) == 0 {
return
}
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
for _, k := range kvs {
fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
}
tw.Flush()
fmt.Fprintln(w)
}
func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
var out []keyValueOutput
for k, v := range attrs {
if strings.HasPrefix(k, prefix) {
out = append(out, keyValueOutput{
Name: strings.TrimPrefix(k, prefix),
Value: v,
})
}
}
if len(out) == 0 {
return nil
}
slices.SortFunc(out, func(a, b keyValueOutput) int {
return cmp.Compare(a.Name, b.Name)
})
return out
}
func digestSetToDigests(ds slsa.DigestSet) []string {
var out []string
for k, v := range ds {
out = append(out, fmt.Sprintf("%s:%s", k, v))
}
return out
}

View File

@@ -1,145 +0,0 @@
package history
import (
"context"
"io"
"github.com/containerd/containerd/v2/core/content/proxy"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/cli/cli/command"
intoto "github.com/in-toto/in-toto-golang/in_toto"
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
"github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type attachmentOptions struct {
builder string
typ string
platform string
ref string
digest digest.Digest
}
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
if err != nil {
return err
}
if len(recs) == 0 {
if opts.ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", opts.ref)
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
}
store := proxy.NewContentStore(c.ContentClient())
if opts.digest != "" {
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
if err != nil {
return err
}
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
return err
}
attachments, err := allAttachments(ctx, store, *rec)
if err != nil {
return err
}
typ := opts.typ
switch typ {
case "index":
typ = ocispecs.MediaTypeImageIndex
case "manifest":
typ = ocispecs.MediaTypeImageManifest
case "image":
typ = ocispecs.MediaTypeImageConfig
case "provenance":
typ = slsa02.PredicateSLSAProvenance
case "sbom":
typ = intoto.PredicateSPDX
}
for _, a := range attachments {
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
continue
}
if typ != "" && descrType(a.descr) != typ {
continue
}
ra, err := store.ReaderAt(ctx, a.descr)
if err != nil {
return err
}
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
return err
}
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
}
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options attachmentOptions
cmd := &cobra.Command{
Use: "attachment [OPTIONS] REF [DIGEST]",
Short: "Inspect a build attachment",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.ref = args[0]
}
if len(args) > 1 {
dgst, err := digest.Parse(args[1])
if err != nil {
return errors.Wrapf(err, "invalid digest %q", args[1])
}
options.digest = dgst
}
if options.digest == "" && options.platform == "" && options.typ == "" {
return errors.New("at least one of --type, --platform or DIGEST must be specified")
}
options.builder = *rootOpts.Builder
return runAttachment(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVar(&options.typ, "type", "", "Type of attachment")
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
return cmd
}

View File

@@ -1,117 +0,0 @@
package history
import (
"context"
"io"
"os"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type logsOptions struct {
builder string
ref string
progress string
}
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
if err != nil {
return err
}
if len(recs) == 0 {
if opts.ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", opts.ref)
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
}
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
Ref: rec.Ref,
})
if err != nil {
return err
}
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
if mode == progressui.AutoMode {
mode = progressui.PlainMode
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
if err != nil {
return err
}
loop0:
for {
select {
case <-ctx.Done():
cl.CloseSend()
return context.Cause(ctx)
default:
ev, err := cl.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break loop0
}
return err
}
printer.Write(client.NewSolveStatus(ev))
}
}
return printer.Wait()
}
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options logsOptions
cmd := &cobra.Command{
Use: "logs [OPTIONS] [REF]",
Short: "Print the logs of a build",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.ref = args[0]
}
options.builder = *rootOpts.Builder
return runLogs(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
return cmd
}

View File

@@ -1,264 +0,0 @@
package history
import (
"context"
"encoding/json"
"fmt"
"os"
"path"
"slices"
"time"
"github.com/containerd/console"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/gitutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
const (
lsHeaderBuildID = "BUILD ID"
lsHeaderName = "NAME"
lsHeaderStatus = "STATUS"
lsHeaderCreated = "CREATED AT"
lsHeaderDuration = "DURATION"
lsHeaderLink = ""
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
headerKeyTimestamp = "buildkit-current-timestamp"
)
type lsOptions struct {
builder string
format string
noTrunc bool
filters []string
local bool
}
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
queryOptions := &queryOptions{}
if opts.local {
wd, err := os.Getwd()
if err != nil {
return err
}
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
if err != nil {
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
return errors.Wrap(err, "git was not found in the system")
}
return errors.Wrapf(err, "could not find git repository for local filter")
}
remote, err := gitc.RemoteURL()
if err != nil {
return errors.Wrapf(err, "could not get remote URL for local filter")
}
queryOptions.Filters = append(queryOptions.Filters, fmt.Sprintf("repository=%s", remote))
}
queryOptions.Filters = append(queryOptions.Filters, opts.filters...)
out, err := queryRecords(ctx, "", nodes, queryOptions)
if err != nil {
return err
}
ls, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
for i, rec := range out {
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
rec.name = buildName(rec.FrontendAttrs, st)
out[i] = rec
}
return lsPrint(dockerCli, out, opts)
}
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options lsOptions
cmd := &cobra.Command{
Use: "ls",
Short: "List build records",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
options.builder = *rootOpts.Builder
return runLs(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
flags.StringArrayVar(&options.filters, "filter", nil, `Provide filter values (e.g., "status=error")`)
flags.BoolVar(&options.local, "local", false, "List records for current repository only")
return cmd
}
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
if in.format == formatter.TableFormatKey {
in.format = lsDefaultTableFormat
}
ctx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.Format(in.format),
Trunc: !in.noTrunc,
}
slices.SortFunc(records, func(a, b historyRecord) int {
if a.CompletedAt == nil && b.CompletedAt != nil {
return -1
}
if a.CompletedAt != nil && b.CompletedAt == nil {
return 1
}
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
var term bool
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
term = true
}
render := func(format func(subContext formatter.SubContext) error) error {
for _, r := range records {
if err := format(&lsContext{
format: formatter.Format(in.format),
isTerm: term,
trunc: !in.noTrunc,
record: &r,
}); err != nil {
return err
}
}
return nil
}
lsCtx := lsContext{
isTerm: term,
trunc: !in.noTrunc,
}
lsCtx.Header = formatter.SubHeaderContext{
"Ref": lsHeaderBuildID,
"Name": lsHeaderName,
"Status": lsHeaderStatus,
"CreatedAt": lsHeaderCreated,
"Duration": lsHeaderDuration,
"Link": lsHeaderLink,
}
return ctx.Write(&lsCtx, render)
}
type lsContext struct {
formatter.HeaderContext
isTerm bool
trunc bool
format formatter.Format
record *historyRecord
}
func (c *lsContext) MarshalJSON() ([]byte, error) {
m := map[string]any{
"ref": c.FullRef(),
"name": c.Name(),
"status": c.Status(),
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
"total_steps": c.record.NumTotalSteps,
"completed_steps": c.record.NumCompletedSteps,
"cached_steps": c.record.NumCachedSteps,
}
if c.record.CompletedAt != nil {
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
}
return json.Marshal(m)
}
func (c *lsContext) Ref() string {
return c.record.Ref
}
func (c *lsContext) FullRef() string {
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
}
func (c *lsContext) Name() string {
name := c.record.name
if c.trunc && c.format.IsTable() {
return trimBeginning(name, 36)
}
return name
}
func (c *lsContext) Status() string {
if c.record.CompletedAt != nil {
if c.record.Error != nil {
return "Error"
}
return "Completed"
}
return "Running"
}
func (c *lsContext) CreatedAt() string {
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
}
func (c *lsContext) Duration() string {
lastTime := c.record.currentTimestamp
if c.record.CompletedAt != nil {
tm := c.record.CompletedAt.AsTime()
lastTime = &tm
}
if lastTime == nil {
return ""
}
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
if c.record.CompletedAt == nil {
v += "+"
}
return v
}
func (c *lsContext) Link() string {
url := desktop.BuildURL(c.FullRef())
if c.format.IsTable() {
if c.isTerm {
return desktop.ANSIHyperlink(url, "Open")
}
return ""
}
return url
}

View File

@@ -1,73 +0,0 @@
package history
import (
"context"
"fmt"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/desktop"
"github.com/docker/cli/cli/command"
"github.com/pkg/browser"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type openOptions struct {
builder string
ref string
}
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
if err != nil {
return err
}
if len(recs) == 0 {
if opts.ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", opts.ref)
}
rec := &recs[0]
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
return browser.OpenURL(url)
}
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options openOptions
cmd := &cobra.Command{
Use: "open [OPTIONS] [REF]",
Short: "Open a build in Docker Desktop",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.ref = args[0]
}
options.builder = *rootOpts.Builder
return runOpen(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
return cmd
}

View File

@@ -1,151 +0,0 @@
package history
import (
"context"
"io"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/cli/cli/command"
"github.com/hashicorp/go-multierror"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
type rmOptions struct {
builder string
refs []string
all bool
}
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
errs := make([][]error, len(opts.refs))
for i := range errs {
errs[i] = make([]error, len(nodes))
}
eg, ctx := errgroup.WithContext(ctx)
for i, node := range nodes {
node := node
eg.Go(func() error {
if node.Driver == nil {
return nil
}
c, err := node.Driver.Client(ctx)
if err != nil {
return err
}
refs := opts.refs
if opts.all {
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
EarlyExit: true,
})
if err != nil {
return err
}
defer serv.CloseSend()
for {
resp, err := serv.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
refs = append(refs, resp.Record.Ref)
}
}
}
for j, ref := range refs {
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
Ref: ref,
Delete: true,
})
if opts.all {
if err != nil {
return err
}
} else {
errs[j][i] = err
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
var out []error
loop0:
for _, nodeErrs := range errs {
var nodeErr error
for _, err1 := range nodeErrs {
if err1 == nil {
continue loop0
}
if nodeErr == nil {
nodeErr = err1
} else {
nodeErr = multierror.Append(nodeErr, err1)
}
}
out = append(out, nodeErr)
}
if len(out) == 0 {
return nil
}
if len(out) == 1 {
return out[0]
}
return multierror.Append(out[0], out[1:]...)
}
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options rmOptions
cmd := &cobra.Command{
Use: "rm [OPTIONS] [REF...]",
Short: "Remove build records",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 && !options.all {
return errors.New("rm requires at least one argument")
}
if len(args) > 0 && options.all {
return errors.New("rm requires either --all or at least one argument")
}
options.refs = args
options.builder = *rootOpts.Builder
return runRm(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.BoolVar(&options.all, "all", false, "Remove all build records")
return cmd
}

View File

@@ -1,33 +0,0 @@
package history
import (
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
)
type RootOptions struct {
Builder *string
}
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "history",
Short: "Commands to work on build records",
ValidArgsFunction: completion.Disable,
RunE: rootcmd.RunE,
}
cmd.AddCommand(
lsCmd(dockerCli, opts),
rmCmd(dockerCli, opts),
logsCmd(dockerCli, opts),
inspectCmd(dockerCli, opts),
openCmd(dockerCli, opts),
traceCmd(dockerCli, opts),
importCmd(dockerCli, opts),
exportCmd(dockerCli, opts),
)
return cmd
}

View File

@@ -1,228 +0,0 @@
package history
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"time"
"github.com/containerd/console"
"github.com/containerd/containerd/v2/core/content/proxy"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/otelutil"
"github.com/docker/buildx/util/otelutil/jaeger"
"github.com/docker/cli/cli/command"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/browser"
"github.com/pkg/errors"
"github.com/spf13/cobra"
jaegerui "github.com/tonistiigi/jaeger-ui-rest"
)
type traceOptions struct {
builder string
ref string
addr string
compare string
}
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
CompletedOnly: true,
})
if err != nil {
return "", nil, err
}
if len(recs) == 0 {
if ref == "" {
return "", nil, errors.New("no records found")
}
return "", nil, errors.Errorf("no record found for ref %q", ref)
}
rec := &recs[0]
if rec.CompletedAt == nil {
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
}
if rec.Trace == nil {
// build is complete but no trace yet. try to finalize the trace
time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return "", nil, err
}
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
Ref: rec.Ref,
Finalize: true,
})
if err != nil {
return "", nil, err
}
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
CompletedOnly: true,
})
if err != nil {
return "", nil, err
}
if len(recs) == 0 {
return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
}
rec = &recs[0]
if rec.Trace == nil {
return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
}
}
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return "", nil, err
}
store := proxy.NewContentStore(c.ContentClient())
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
Digest: digest.Digest(rec.Trace.Digest),
MediaType: rec.Trace.MediaType,
Size: rec.Trace.Size,
})
if err != nil {
return "", nil, err
}
spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
if err != nil {
return "", nil, err
}
wrapper := struct {
Data []jaeger.Trace `json:"data"`
}{
Data: spans.JaegerData().Data,
}
if len(wrapper.Data) == 0 {
return "", nil, errors.New("no trace data")
}
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetIndent("", " ")
if err := enc.Encode(wrapper); err != nil {
return "", nil, err
}
return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
}
func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
traceID, data, err := loadTrace(ctx, opts.ref, nodes)
if err != nil {
return err
}
srv := jaegerui.NewServer(jaegerui.Config{})
if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
return err
}
url := "/trace/" + traceID
if opts.compare != "" {
traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
if err != nil {
return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
}
if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
return err
}
url = "/trace/" + traceIDcomp + "..." + traceID
}
var term bool
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
term = true
}
if !term && opts.compare == "" {
fmt.Fprintln(dockerCli.Out(), string(data))
return nil
}
ln, err := net.Listen("tcp", opts.addr)
if err != nil {
return err
}
go func() {
time.Sleep(100 * time.Millisecond)
browser.OpenURL(url)
}()
url = "http://" + ln.Addr().String() + url
fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
go func() {
<-ctx.Done()
ln.Close()
}()
err = srv.Serve(ln)
if err != nil {
select {
case <-ctx.Done():
return nil
default:
}
}
return err
}
func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options traceOptions
cmd := &cobra.Command{
Use: "trace [OPTIONS] [REF]",
Short: "Show the OpenTelemetry trace of a build record",
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.ref = args[0]
}
options.builder = *rootOpts.Builder
return runTrace(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
return cmd
}

View File

@@ -1,403 +0,0 @@
package history
import (
"bytes"
"context"
"encoding/csv"
"fmt"
"io"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const recordsLimit = 50
func buildName(fattrs map[string]string, ls *localstate.State) string {
var res string
var target, contextPath, dockerfilePath, vcsSource string
if v, ok := fattrs["target"]; ok {
target = v
}
if v, ok := fattrs["context"]; ok {
contextPath = filepath.ToSlash(v)
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
contextPath = filepath.ToSlash(v)
}
if v, ok := fattrs["vcs:source"]; ok {
vcsSource = v
}
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
dockerfilePath = filepath.ToSlash(v)
}
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
}
var localPath string
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
if ls.LocalPath != "" && ls.LocalPath != "-" {
localPath = filepath.ToSlash(ls.LocalPath)
}
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
}
}
// remove default dockerfile name
const defaultFilename = "/Dockerfile"
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
// dockerfile is a subpath of context
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
} else {
// Otherwise, use basename
bpath := localPath
if len(dockerfilePath) > 0 {
bpath = dockerfilePath
}
if len(bpath) > 0 {
lidx := strings.LastIndex(bpath, "/")
res = bpath[lidx+1:]
if !hasDefaultFileName {
if lidx != -1 {
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
} else {
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
}
}
}
}
if len(contextPath) > 0 {
res = contextPath
}
if len(target) > 0 {
if len(res) > 0 {
res = res + " (" + target + ")"
} else {
res = target
}
}
if res == "" && vcsSource != "" {
return vcsSource
}
return res
}
func trimBeginning(s string, n int) string {
if len(s) <= n {
return s
}
return ".." + s[len(s)-n+2:]
}
type historyRecord struct {
*controlapi.BuildHistoryRecord
currentTimestamp *time.Time
node *builder.Node
name string
}
type queryOptions struct {
CompletedOnly bool
Filters []string
}
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
var mu sync.Mutex
var out []historyRecord
var offset *int
if strings.HasPrefix(ref, "^") {
off, err := strconv.Atoi(ref[1:])
if err != nil {
return nil, errors.Wrapf(err, "invalid offset %q", ref)
}
offset = &off
ref = ""
}
var filters []string
if opts != nil {
filters = opts.Filters
}
eg, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
node := node
eg.Go(func() error {
if node.Driver == nil {
return nil
}
var records []historyRecord
c, err := node.Driver.Client(ctx)
if err != nil {
return err
}
var matchers []matchFunc
if len(filters) > 0 {
filters, matchers, err = dockerFiltersToBuildkit(filters)
if err != nil {
return err
}
sb := bytes.NewBuffer(nil)
w := csv.NewWriter(sb)
w.Write(filters)
w.Flush()
filters = []string{strings.TrimSuffix(sb.String(), "\n")}
}
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
EarlyExit: true,
Ref: ref,
Limit: recordsLimit,
Filter: filters,
})
if err != nil {
return err
}
md, err := serv.Header()
if err != nil {
return err
}
var ts *time.Time
if v, ok := md[headerKeyTimestamp]; ok {
t, err := time.Parse(time.RFC3339Nano, v[0])
if err != nil {
return err
}
ts = &t
}
defer serv.CloseSend()
loop0:
for {
he, err := serv.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
continue
}
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
continue
}
// for older buildkit that don't support filters apply local filters
for _, matcher := range matchers {
if !matcher(he.Record) {
continue loop0
}
}
records = append(records, historyRecord{
BuildHistoryRecord: he.Record,
currentTimestamp: ts,
node: &node,
})
}
mu.Lock()
out = append(out, records...)
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
slices.SortFunc(out, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
if offset != nil {
var filtered []historyRecord
for _, r := range out {
if *offset > 0 {
*offset--
continue
}
filtered = append(filtered, r)
break
}
if *offset > 0 {
return nil, errors.Errorf("no completed build found with offset %d", *offset)
}
out = filtered
}
return out, nil
}
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%.1fs", d.Seconds())
}
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
}
type matchFunc func(*controlapi.BuildHistoryRecord) bool
func dockerFiltersToBuildkit(in []string) ([]string, []matchFunc, error) {
out := []string{}
matchers := []matchFunc{}
for _, f := range in {
key, value, sep, found := cutAny(f, "!=", "=", "<=", "<", ">=", ">")
if !found {
return nil, nil, errors.Errorf("invalid filter %q", f)
}
switch key {
case "ref", "repository", "status":
if sep != "=" && sep != "!=" {
return nil, nil, errors.Errorf("invalid separator for %q, expected = or !=", f)
}
matchers = append(matchers, valueFiler(key, value, sep))
if sep == "=" {
if key == "status" {
sep = "=="
} else {
sep = "~="
}
}
case "startedAt", "completedAt", "duration":
if sep == "=" || sep == "!=" {
return nil, nil, errors.Errorf("invalid separator for %q, expected <=, <, >= or >", f)
}
matcher, err := timeBasedFilter(key, value, sep)
if err != nil {
return nil, nil, err
}
matchers = append(matchers, matcher)
default:
return nil, nil, errors.Errorf("unsupported filter %q", f)
}
out = append(out, key+sep+value)
}
return out, matchers, nil
}
func valueFiler(key, value, sep string) matchFunc {
return func(rec *controlapi.BuildHistoryRecord) bool {
var recValue string
switch key {
case "ref":
recValue = rec.Ref
case "repository":
v, ok := rec.FrontendAttrs["vcs:source"]
if ok {
recValue = v
} else {
if context, ok := rec.FrontendAttrs["context"]; ok {
if ref, err := gitutil.ParseGitRef(context); err == nil {
recValue = ref.Remote
}
}
}
case "status":
if rec.CompletedAt != nil {
if rec.Error != nil {
if strings.Contains(rec.Error.Message, "context canceled") {
recValue = "canceled"
} else {
recValue = "error"
}
} else {
recValue = "completed"
}
} else {
recValue = "running"
}
}
switch sep {
case "=":
if key == "status" {
return recValue == value
}
return strings.Contains(recValue, value)
case "!=":
return recValue != value
default:
return false
}
}
}
func timeBasedFilter(key, value, sep string) (matchFunc, error) {
var cmp int64
switch key {
case "startedAt", "completedAt":
v, err := time.ParseDuration(value)
if err == nil {
tm := time.Now().Add(-v)
cmp = tm.Unix()
} else {
tm, err := time.Parse(time.RFC3339, value)
if err != nil {
return nil, errors.Errorf("invalid time %s", value)
}
cmp = tm.Unix()
}
case "duration":
v, err := time.ParseDuration(value)
if err != nil {
return nil, errors.Errorf("invalid duration %s", value)
}
cmp = int64(v)
default:
return nil, nil
}
return func(rec *controlapi.BuildHistoryRecord) bool {
var val int64
switch key {
case "startedAt":
val = rec.CreatedAt.AsTime().Unix()
case "completedAt":
if rec.CompletedAt != nil {
val = rec.CompletedAt.AsTime().Unix()
}
case "duration":
if rec.CompletedAt != nil {
val = int64(rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()))
}
}
switch sep {
case ">=":
return val >= cmp
case "<=":
return val <= cmp
case ">":
return val > cmp
default:
return val < cmp
}
}, nil
}
func cutAny(s string, seps ...string) (before, after, sep string, found bool) {
for _, sep := range seps {
if idx := strings.Index(s, sep); idx != -1 {
return s[:idx], s[idx+len(sep):], sep, true
}
}
return s, "", "", false
}

View File

@@ -194,7 +194,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
}
s := s
eg2.Go(func() error {
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
return r.Copy(ctx, s, t)
})
}
@@ -202,7 +202,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
if err := eg2.Wait(); err != nil {
return err
}
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
return r.Push(ctx, t, desc, dt)
})
})

View File

@@ -115,25 +115,6 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
}
}
if len(nodes[i].CDIDevices) > 0 {
fmt.Fprintf(w, "Devices:\n")
for _, dev := range nodes[i].CDIDevices {
fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
if dev.OnDemand {
fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
} else {
fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
}
if len(dev.Annotations) > 0 {
fmt.Fprintf(w, "\tAnnotations:\n")
for k, v := range dev.Annotations {
fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
}
}
}
}
for ri, rule := range nodes[i].GCPolicy {
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"sort"
"strings"
"time"
@@ -160,9 +159,6 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
}
continue
}
if ctx.Format.IsJSON() {
continue
}
for _, n := range b.Nodes() {
if n.Err != nil {
if ctx.Format.IsTable() {
@@ -410,7 +406,9 @@ func truncPlatforms(pfs []string, max int) truncatedPlatforms {
left[ppf] = append(left[ppf], pf)
}
}
maps.Copy(res, left)
for k, v := range left {
res[k] = v
}
return truncatedPlatforms{
res: res,
input: pfs,

View File

@@ -5,8 +5,8 @@ import (
"os"
debugcmd "github.com/docker/buildx/commands/debug"
historycmd "github.com/docker/buildx/commands/history"
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
"github.com/docker/buildx/controller/remote"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/logutil"
@@ -15,14 +15,13 @@ import (
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/util/appcontext"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra.Command {
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
var opt rootOptions
cmd := &cobra.Command{
Short: "Docker Buildx",
@@ -40,17 +39,7 @@ func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra
}
cmd.SetContext(appcontext.Context())
if !isPlugin {
// InstallFlags and SetDefaultOptions are necessary to match
// the plugin mode behavior to handle env vars such as
// DOCKER_TLS, DOCKER_TLS_VERIFY, ... and we also need to use a
// new flagset to avoid conflict with the global debug flag
// that we already handle in the root command otherwise it
// would panic.
nflags := pflag.NewFlagSet(cmd.DisplayName(), pflag.ContinueOnError)
options := cliflags.NewClientOptions()
options.InstallFlags(nflags)
options.SetDefaultOptions(nflags)
return dockerCli.Initialize(options)
return nil
}
return plugin.PersistentPreRunE(cmd, args)
},
@@ -117,12 +106,12 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
pruneCmd(dockerCli, opts),
duCmd(dockerCli, opts),
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
)
if confutil.IsExperimental() {
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
newDebuggableBuild(dockerCli, opts),
))
remote.AddControllerCommands(cmd, dockerCli)
}
cmd.RegisterFlagCompletionFunc( //nolint:errcheck

View File

@@ -34,7 +34,7 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in *Options, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
@@ -75,9 +75,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *Options, inStream
opts.Platforms = platforms
dockerConfig := dockerCli.ConfigFile()
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
ConfigFile: dockerConfig,
}))
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
secrets, err := controllerapi.CreateSecrets(in.Secrets)
if err != nil {

View File

@@ -5,22 +5,29 @@ import (
"io"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
)
type BuildxController interface {
Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, inputs *build.Inputs, err error)
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
// Invoke starts an IO session into the specified process.
// If pid doesn't match to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
Invoke(ctx context.Context, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
Kill(ctx context.Context) error
Close() error
ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error)
DisconnectProcess(ctx context.Context, pid string) error
Inspect(ctx context.Context) *cbuild.Options
List(ctx context.Context) (refs []string, _ error)
Disconnect(ctx context.Context, ref string) error
ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error)
DisconnectProcess(ctx context.Context, ref, pid string) error
Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error)
}
type ControlOptions struct {
ServerConfig string
Root string
Detach bool
}

View File

@@ -2,12 +2,35 @@ package controller
import (
"context"
"fmt"
"github.com/docker/buildx/controller/control"
"github.com/docker/buildx/controller/local"
"github.com/docker/buildx/controller/remote"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
)
func NewController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
return local.NewLocalBuildxController(ctx, dockerCli)
func NewController(ctx context.Context, opts control.ControlOptions, dockerCli command.Cli, pw progress.Writer) (control.BuildxController, error) {
var name string
if opts.Detach {
name = "remote"
} else {
name = "local"
}
var c control.BuildxController
err := progress.Wrap(fmt.Sprintf("[internal] connecting to %s controller", name), pw.Write, func(l progress.SubLogger) (err error) {
if opts.Detach {
c, err = remote.NewRemoteBuildxController(ctx, dockerCli, opts, l)
} else {
c = local.NewLocalBuildxController(ctx, dockerCli, l)
}
return err
})
if err != nil {
return nil, errors.Wrap(err, "failed to start buildx controller")
}
return c, nil
}

View File

@@ -1,20 +1,48 @@
package errdefs
import (
"io"
"github.com/containerd/typeurl/v2"
"github.com/docker/buildx/util/desktop"
"github.com/moby/buildkit/util/grpcerrors"
)
func init() {
typeurl.Register((*Build)(nil), "github.com/docker/buildx", "errdefs.Build+json")
}
type BuildError struct {
err error
*Build
error
}
func (e *BuildError) Unwrap() error {
return e.err
return e.error
}
func (e *BuildError) Error() string {
return e.err.Error()
func (e *BuildError) ToProto() grpcerrors.TypedErrorProto {
return e.Build
}
func WrapBuild(err error) error {
func (e *BuildError) PrintBuildDetails(w io.Writer) error {
if e.Ref == "" {
return nil
}
ebr := &desktop.ErrorWithBuildRef{
Ref: e.Ref,
Err: e.error,
}
return ebr.Print(w)
}
func WrapBuild(err error, sessionID string, ref string) error {
if err == nil {
return nil
}
return &BuildError{err: err}
return &BuildError{Build: &Build{SessionID: sessionID, Ref: ref}, error: err}
}
func (b *Build) WrapError(err error) error {
return &BuildError{error: err, Build: b}
}

View File

@@ -0,0 +1,157 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v3.11.4
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
package errdefs
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Build struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"`
Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"`
}
func (x *Build) Reset() {
*x = Build{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Build) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Build) ProtoMessage() {}
func (x *Build) ProtoReflect() protoreflect.Message {
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Build.ProtoReflect.Descriptor instead.
func (*Build) Descriptor() ([]byte, []int) {
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP(), []int{0}
}
func (x *Build) GetSessionID() string {
if x != nil {
return x.SessionID
}
return ""
}
func (x *Build) GetRef() string {
if x != nil {
return x.Ref
}
return ""
}
var File_github_com_docker_buildx_controller_errdefs_errdefs_proto protoreflect.FileDescriptor
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63,
0x6b, 0x65, 0x72, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2f, 0x65, 0x72,
0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x6f, 0x63,
0x6b, 0x65, 0x72, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65,
0x66, 0x73, 0x22, 0x37, 0x0a, 0x05, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x53,
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x42, 0x2d, 0x5a, 0x2b, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72,
0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce sync.Once
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc
)
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP() []byte {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce.Do(func() {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData)
})
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData
}
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = []interface{}{
(*Build)(nil), // 0: docker.buildx.errdefs.Build
}
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() }
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() {
if File_github_com_docker_buildx_controller_errdefs_errdefs_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Build); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes,
DependencyIndexes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs,
MessageInfos: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes,
}.Build()
File_github_com_docker_buildx_controller_errdefs_errdefs_proto = out.File
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = nil
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = nil
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = nil
}

View File

@@ -0,0 +1,10 @@
syntax = "proto3";
package docker.buildx.errdefs;
option go_package = "github.com/docker/buildx/controller/errdefs";
message Build {
string SessionID = 1;
string Ref = 2;
}

View File

@@ -0,0 +1,241 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
package errdefs
import (
fmt "fmt"
protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
proto "google.golang.org/protobuf/proto"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
func (m *Build) CloneVT() *Build {
if m == nil {
return (*Build)(nil)
}
r := new(Build)
r.SessionID = m.SessionID
r.Ref = m.Ref
if len(m.unknownFields) > 0 {
r.unknownFields = make([]byte, len(m.unknownFields))
copy(r.unknownFields, m.unknownFields)
}
return r
}
func (m *Build) CloneMessageVT() proto.Message {
return m.CloneVT()
}
func (this *Build) EqualVT(that *Build) bool {
if this == that {
return true
} else if this == nil || that == nil {
return false
}
if this.SessionID != that.SessionID {
return false
}
if this.Ref != that.Ref {
return false
}
return string(this.unknownFields) == string(that.unknownFields)
}
func (this *Build) EqualMessageVT(thatMsg proto.Message) bool {
that, ok := thatMsg.(*Build)
if !ok {
return false
}
return this.EqualVT(that)
}
func (m *Build) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
size := m.SizeVT()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Build) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
func (m *Build) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
i := len(dAtA)
_ = i
var l int
_ = l
if m.unknownFields != nil {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
if len(m.Ref) > 0 {
i -= len(m.Ref)
copy(dAtA[i:], m.Ref)
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ref)))
i--
dAtA[i] = 0x12
}
if len(m.SessionID) > 0 {
i -= len(m.SessionID)
copy(dAtA[i:], m.SessionID)
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SessionID)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Build) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.SessionID)
if l > 0 {
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
}
l = len(m.Ref)
if l > 0 {
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
}
n += len(m.unknownFields)
return n
}
func (m *Build) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Build: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protohelpers.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protohelpers.ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SessionID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protohelpers.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protohelpers.ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := protohelpers.Skip(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return protohelpers.ErrInvalidLength
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
controllererrors "github.com/docker/buildx/controller/errdefs"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
@@ -18,9 +19,10 @@ import (
"github.com/pkg/errors"
)
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
return &localController{
dockerCli: dockerCli,
sessionID: "local",
processes: processes.NewManager(),
}
}
@@ -29,20 +31,21 @@ type buildConfig struct {
// TODO: these two structs should be merged
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
resultCtx *build.ResultHandle
buildOptions *cbuild.Options
buildOptions *controllerapi.BuildOptions
}
type localController struct {
dockerCli command.Cli
sessionID string
buildConfig buildConfig
processes *processes.Manager
buildOnGoing atomic.Bool
}
func (b *localController) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (*client.SolveResponse, *build.Inputs, error) {
func (b *localController) Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) {
return nil, nil, errors.New("build ongoing")
return "", nil, nil, errors.New("build ongoing")
}
defer b.buildOnGoing.Store(false)
@@ -54,20 +57,31 @@ func (b *localController) Build(ctx context.Context, options *cbuild.Options, in
buildOptions: options,
}
if buildErr != nil {
buildErr = controllererrors.WrapBuild(buildErr)
var ref string
var ebr *desktop.ErrorWithBuildRef
if errors.As(buildErr, &ebr) {
ref = ebr.Ref
}
buildErr = controllererrors.WrapBuild(buildErr, b.sessionID, ref)
}
}
if buildErr != nil {
return nil, nil, buildErr
return "", nil, nil, buildErr
}
return resp, dockerfileMappings, nil
return b.sessionID, resp, dockerfileMappings, nil
}
func (b *localController) ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error) {
func (b *localController) ListProcesses(ctx context.Context, sessionID string) (infos []*controllerapi.ProcessInfo, retErr error) {
if sessionID != b.sessionID {
return nil, errors.Errorf("unknown session ID %q", sessionID)
}
return b.processes.ListProcesses(), nil
}
func (b *localController) DisconnectProcess(ctx context.Context, pid string) error {
func (b *localController) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
if sessionID != b.sessionID {
return errors.Errorf("unknown session ID %q", sessionID)
}
return b.processes.DeleteProcess(pid)
}
@@ -75,7 +89,11 @@ func (b *localController) cancelRunningProcesses() {
b.processes.CancelRunningProcesses()
}
func (b *localController) Invoke(ctx context.Context, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
func (b *localController) Invoke(ctx context.Context, sessionID string, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
if sessionID != b.sessionID {
return errors.Errorf("unknown session ID %q", sessionID)
}
proc, ok := b.processes.Get(pid)
if !ok {
// Start a new process.
@@ -103,6 +121,11 @@ func (b *localController) Invoke(ctx context.Context, pid string, cfg *controlle
}
}
func (b *localController) Kill(context.Context) error {
b.Close()
return nil
}
func (b *localController) Close() error {
b.cancelRunningProcesses()
if b.buildConfig.resultCtx != nil {
@@ -112,6 +135,18 @@ func (b *localController) Close() error {
return nil
}
func (b *localController) Inspect(ctx context.Context) *cbuild.Options {
return b.buildConfig.buildOptions
func (b *localController) List(ctx context.Context) (res []string, _ error) {
return []string{b.sessionID}, nil
}
func (b *localController) Disconnect(ctx context.Context, key string) error {
b.Close()
return nil
}
func (b *localController) Inspect(ctx context.Context, sessionID string) (*controllerapi.InspectResponse, error) {
if sessionID != b.sessionID {
return nil, errors.Errorf("unknown session ID %q", sessionID)
}
return &controllerapi.InspectResponse{Options: b.buildConfig.buildOptions}, nil
}

View File

@@ -1,11 +1,5 @@
package pb
type Attest struct {
Type string
Disabled bool
Attrs string
}
func CreateAttestations(attests []*Attest) map[string]*string {
result := map[string]*string{}
for _, attest := range attests {

View File

@@ -1,15 +1,6 @@
package pb
import (
"maps"
"github.com/moby/buildkit/client"
)
type CacheOptionsEntry struct {
Type string
Attrs map[string]string
}
import "github.com/moby/buildkit/client"
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
var outs []client.CacheOptionsEntry
@@ -21,7 +12,9 @@ func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
Type: entry.Type,
Attrs: map[string]string{},
}
maps.Copy(out.Attrs, entry.Attrs)
for k, v := range entry.Attrs {
out.Attrs[k] = v
}
outs = append(outs, out)
}
return outs

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,250 @@
syntax = "proto3";
package buildx.controller.v1;
import "github.com/moby/buildkit/api/services/control/control.proto";
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
option go_package = "github.com/docker/buildx/controller/pb";
service Controller {
rpc Build(BuildRequest) returns (BuildResponse);
rpc Inspect(InspectRequest) returns (InspectResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Input(stream InputMessage) returns (InputResponse);
rpc Invoke(stream Message) returns (stream Message);
rpc List(ListRequest) returns (ListResponse);
rpc Disconnect(DisconnectRequest) returns (DisconnectResponse);
rpc Info(InfoRequest) returns (InfoResponse);
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc DisconnectProcess(DisconnectProcessRequest) returns (DisconnectProcessResponse);
}
message ListProcessesRequest {
string SessionID = 1;
}
message ListProcessesResponse {
repeated ProcessInfo Infos = 1;
}
message ProcessInfo {
string ProcessID = 1;
InvokeConfig InvokeConfig = 2;
}
message DisconnectProcessRequest {
string SessionID = 1;
string ProcessID = 2;
}
message DisconnectProcessResponse {
}
message BuildRequest {
string SessionID = 1;
BuildOptions Options = 2;
}
message BuildOptions {
string ContextPath = 1;
string DockerfileName = 2;
CallFunc CallFunc = 3;
map<string, string> NamedContexts = 4;
repeated string Allow = 5;
repeated Attest Attests = 6;
map<string, string> BuildArgs = 7;
repeated CacheOptionsEntry CacheFrom = 8;
repeated CacheOptionsEntry CacheTo = 9;
string CgroupParent = 10;
repeated ExportEntry Exports = 11;
repeated string ExtraHosts = 12;
map<string, string> Labels = 13;
string NetworkMode = 14;
repeated string NoCacheFilter = 15;
repeated string Platforms = 16;
repeated Secret Secrets = 17;
int64 ShmSize = 18;
repeated SSH SSH = 19;
repeated string Tags = 20;
string Target = 21;
UlimitOpt Ulimits = 22;
string Builder = 23;
bool NoCache = 24;
bool Pull = 25;
bool ExportPush = 26;
bool ExportLoad = 27;
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 28;
string Ref = 29;
string GroupRef = 30;
repeated string Annotations = 31;
string ProvenanceResponseMode = 32;
}
message ExportEntry {
string Type = 1;
map<string, string> Attrs = 2;
string Destination = 3;
}
message CacheOptionsEntry {
string Type = 1;
map<string, string> Attrs = 2;
}
message Attest {
string Type = 1;
bool Disabled = 2;
string Attrs = 3;
}
message SSH {
string ID = 1;
repeated string Paths = 2;
}
message Secret {
string ID = 1;
string FilePath = 2;
string Env = 3;
}
message CallFunc {
string Name = 1;
string Format = 2;
bool IgnoreStatus = 3;
}
message InspectRequest {
string SessionID = 1;
}
message InspectResponse {
BuildOptions Options = 1;
}
message UlimitOpt {
map<string, Ulimit> values = 1;
}
message Ulimit {
string Name = 1;
int64 Hard = 2;
int64 Soft = 3;
}
message BuildResponse {
map<string, string> ExporterResponse = 1;
}
message DisconnectRequest {
string SessionID = 1;
}
message DisconnectResponse {}
message ListRequest {
string SessionID = 1;
}
message ListResponse {
repeated string keys = 1;
}
message InputMessage {
oneof Input {
InputInitMessage Init = 1;
DataMessage Data = 2;
}
}
message InputInitMessage {
string SessionID = 1;
}
message DataMessage {
bool EOF = 1; // true if eof was reached
bytes Data = 2; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message InputResponse {}
message Message {
oneof Input {
InitMessage Init = 1;
// FdMessage used from client to server for input (stdin) and
// from server to client for output (stdout, stderr)
FdMessage File = 2;
// ResizeMessage used from client to server for terminal resize events
ResizeMessage Resize = 3;
// SignalMessage is used from client to server to send signal events
SignalMessage Signal = 4;
}
}
message InitMessage {
string SessionID = 1;
// If ProcessID already exists in the server, it tries to connect to it
// instead of invoking the new one. In this case, InvokeConfig will be ignored.
string ProcessID = 2;
InvokeConfig InvokeConfig = 3;
}
message InvokeConfig {
repeated string Entrypoint = 1;
repeated string Cmd = 2;
bool NoCmd = 11; // Do not set cmd but use the image's default
repeated string Env = 3;
string User = 4;
bool NoUser = 5; // Do not set user but use the image's default
string Cwd = 6;
bool NoCwd = 7; // Do not set cwd but use the image's default
bool Tty = 8;
bool Rollback = 9; // Kill all process in the container and recreate it.
bool Initial = 10; // Run container from the initial state of that stage (supported only on the failed step)
}
message FdMessage {
uint32 Fd = 1; // what fd the data was from
bool EOF = 2; // true if eof was reached
bytes Data = 3; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message ResizeMessage {
uint32 Rows = 1;
uint32 Cols = 2;
}
message SignalMessage {
// we only send name (ie HUP, INT) because the int values
// are platform dependent.
string Name = 1;
}
message StatusRequest {
string SessionID = 1;
}
message StatusResponse {
repeated moby.buildkit.v1.Vertex vertexes = 1;
repeated moby.buildkit.v1.VertexStatus statuses = 2;
repeated moby.buildkit.v1.VertexLog logs = 3;
repeated moby.buildkit.v1.VertexWarning warnings = 4;
}
message InfoRequest {}
message InfoResponse {
BuildxVersion buildxVersion = 1;
}
message BuildxVersion {
string package = 1;
string version = 2;
string revision = 3;
}

View File

@@ -0,0 +1,452 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.11.4
// source: github.com/docker/buildx/controller/pb/controller.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Controller_Build_FullMethodName = "/buildx.controller.v1.Controller/Build"
Controller_Inspect_FullMethodName = "/buildx.controller.v1.Controller/Inspect"
Controller_Status_FullMethodName = "/buildx.controller.v1.Controller/Status"
Controller_Input_FullMethodName = "/buildx.controller.v1.Controller/Input"
Controller_Invoke_FullMethodName = "/buildx.controller.v1.Controller/Invoke"
Controller_List_FullMethodName = "/buildx.controller.v1.Controller/List"
Controller_Disconnect_FullMethodName = "/buildx.controller.v1.Controller/Disconnect"
Controller_Info_FullMethodName = "/buildx.controller.v1.Controller/Info"
Controller_ListProcesses_FullMethodName = "/buildx.controller.v1.Controller/ListProcesses"
Controller_DisconnectProcess_FullMethodName = "/buildx.controller.v1.Controller/DisconnectProcess"
)
// ControllerClient is the client API for Controller service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ControllerClient interface {
Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error)
Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error)
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error)
Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error)
Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error)
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error)
Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error)
DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error)
}
type controllerClient struct {
cc grpc.ClientConnInterface
}
func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient {
return &controllerClient{cc}
}
func (c *controllerClient) Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BuildResponse)
err := c.cc.Invoke(ctx, Controller_Build_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(InspectResponse)
err := c.cc.Invoke(ctx, Controller_Inspect_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[0], Controller_Status_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[StatusRequest, StatusResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_StatusClient = grpc.ServerStreamingClient[StatusResponse]
func (c *controllerClient) Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[1], Controller_Input_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[InputMessage, InputResponse]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InputClient = grpc.ClientStreamingClient[InputMessage, InputResponse]
func (c *controllerClient) Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[2], Controller_Invoke_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[Message, Message]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InvokeClient = grpc.BidiStreamingClient[Message, Message]
func (c *controllerClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListResponse)
err := c.cc.Invoke(ctx, Controller_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DisconnectResponse)
err := c.cc.Invoke(ctx, Controller_Disconnect_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(InfoResponse)
err := c.cc.Invoke(ctx, Controller_Info_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListProcessesResponse)
err := c.cc.Invoke(ctx, Controller_ListProcesses_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DisconnectProcessResponse)
err := c.cc.Invoke(ctx, Controller_DisconnectProcess_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ControllerServer is the server API for Controller service.
// All implementations should embed UnimplementedControllerServer
// for forward compatibility.
type ControllerServer interface {
Build(context.Context, *BuildRequest) (*BuildResponse, error)
Inspect(context.Context, *InspectRequest) (*InspectResponse, error)
Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error
Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error
Invoke(grpc.BidiStreamingServer[Message, Message]) error
List(context.Context, *ListRequest) (*ListResponse, error)
Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error)
Info(context.Context, *InfoRequest) (*InfoResponse, error)
ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error)
DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error)
}
// UnimplementedControllerServer should be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedControllerServer struct{}
func (UnimplementedControllerServer) Build(context.Context, *BuildRequest) (*BuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Build not implemented")
}
func (UnimplementedControllerServer) Inspect(context.Context, *InspectRequest) (*InspectResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Inspect not implemented")
}
func (UnimplementedControllerServer) Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error {
return status.Errorf(codes.Unimplemented, "method Status not implemented")
}
func (UnimplementedControllerServer) Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error {
return status.Errorf(codes.Unimplemented, "method Input not implemented")
}
func (UnimplementedControllerServer) Invoke(grpc.BidiStreamingServer[Message, Message]) error {
return status.Errorf(codes.Unimplemented, "method Invoke not implemented")
}
func (UnimplementedControllerServer) List(context.Context, *ListRequest) (*ListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedControllerServer) Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Disconnect not implemented")
}
func (UnimplementedControllerServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
}
func (UnimplementedControllerServer) ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListProcesses not implemented")
}
func (UnimplementedControllerServer) DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DisconnectProcess not implemented")
}
func (UnimplementedControllerServer) testEmbeddedByValue() {}
// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControllerServer will
// result in compilation errors.
type UnsafeControllerServer interface {
mustEmbedUnimplementedControllerServer()
}
func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) {
// If the following call pancis, it indicates UnimplementedControllerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Controller_ServiceDesc, srv)
}
func _Controller_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BuildRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Build(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Build_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Build(ctx, req.(*BuildRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Inspect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InspectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Inspect(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Inspect_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Inspect(ctx, req.(*InspectRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(StatusRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(ControllerServer).Status(m, &grpc.GenericServerStream[StatusRequest, StatusResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_StatusServer = grpc.ServerStreamingServer[StatusResponse]
func _Controller_Input_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ControllerServer).Input(&grpc.GenericServerStream[InputMessage, InputResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InputServer = grpc.ClientStreamingServer[InputMessage, InputResponse]
func _Controller_Invoke_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ControllerServer).Invoke(&grpc.GenericServerStream[Message, Message]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InvokeServer = grpc.BidiStreamingServer[Message, Message]
func _Controller_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).List(ctx, req.(*ListRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Disconnect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DisconnectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Disconnect(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Disconnect_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Disconnect(ctx, req.(*DisconnectRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InfoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Info(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Info_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Info(ctx, req.(*InfoRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_ListProcesses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListProcessesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).ListProcesses(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_ListProcesses_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).ListProcesses(ctx, req.(*ListProcessesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_DisconnectProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DisconnectProcessRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).DisconnectProcess(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_DisconnectProcess_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).DisconnectProcess(ctx, req.(*DisconnectProcessRequest))
}
return interceptor(ctx, in, info, handler)
}
// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Controller_ServiceDesc = grpc.ServiceDesc{
ServiceName: "buildx.controller.v1.Controller",
HandlerType: (*ControllerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Build",
Handler: _Controller_Build_Handler,
},
{
MethodName: "Inspect",
Handler: _Controller_Inspect_Handler,
},
{
MethodName: "List",
Handler: _Controller_List_Handler,
},
{
MethodName: "Disconnect",
Handler: _Controller_Disconnect_Handler,
},
{
MethodName: "Info",
Handler: _Controller_Info_Handler,
},
{
MethodName: "ListProcesses",
Handler: _Controller_ListProcesses_Handler,
},
{
MethodName: "DisconnectProcess",
Handler: _Controller_DisconnectProcess_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Status",
Handler: _Controller_Status_Handler,
ServerStreams: true,
},
{
StreamName: "Input",
Handler: _Controller_Input_Handler,
ClientStreams: true,
},
{
StreamName: "Invoke",
Handler: _Controller_Invoke_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "github.com/docker/buildx/controller/pb/controller.proto",
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@ package pb
import (
"io"
"maps"
"os"
"strconv"
@@ -11,12 +10,6 @@ import (
"github.com/pkg/errors"
)
type ExportEntry struct {
Type string
Attrs map[string]string
Destination string
}
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
var outs []client.ExportEntry
var localPaths []string
@@ -33,7 +26,9 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, erro
Type: entry.Type,
Attrs: map[string]string{},
}
maps.Copy(out.Attrs, entry.Attrs)
for k, v := range entry.Attrs {
out.Attrs[k] = v
}
supportFile := false
supportDir := false
@@ -51,7 +46,6 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, erro
supportDir = !tar
case "registry":
out.Type = client.ExporterImage
out.Attrs["push"] = "true"
}
if supportDir {

View File

@@ -1,40 +0,0 @@
package pb
import (
"fmt"
"strings"
)
type CallFunc struct {
Name string
Format string
IgnoreStatus bool
}
func (x *CallFunc) String() string {
var elems []string
if x.Name != "" {
elems = append(elems, fmt.Sprintf("Name:%q", x.Name))
}
if x.Format != "" {
elems = append(elems, fmt.Sprintf("Format:%q", x.Format))
}
if x.IgnoreStatus {
elems = append(elems, fmt.Sprintf("IgnoreStatus:%v", x.IgnoreStatus))
}
return strings.Join(elems, " ")
}
type InvokeConfig struct {
Entrypoint []string
Cmd []string
NoCmd bool
Env []string
User string
NoUser bool
Cwd string
NoCwd bool
Tty bool
Rollback bool
Initial bool
}

View File

@@ -1,52 +1,15 @@
package build
package pb
import (
"path/filepath"
"strings"
"github.com/docker/buildx/controller/pb"
sourcepolicy "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/gitutil"
)
type Options struct {
ContextPath string
DockerfileName string
CallFunc *pb.CallFunc
NamedContexts map[string]string
Allow []string
Attests []*pb.Attest
BuildArgs map[string]string
CacheFrom []*pb.CacheOptionsEntry
CacheTo []*pb.CacheOptionsEntry
CgroupParent string
Exports []*pb.ExportEntry
ExtraHosts []string
Labels map[string]string
NetworkMode string
NoCacheFilter []string
Platforms []string
Secrets []*pb.Secret
ShmSize int64
SSH []*pb.SSH
Tags []string
Target string
Ulimits *pb.UlimitOpt
Builder string
NoCache bool
Pull bool
ExportPush bool
ExportLoad bool
SourcePolicy *sourcepolicy.Policy
Ref string
GroupRef string
Annotations []string
ProvenanceResponseMode string
}
// ResolveOptionPaths resolves all paths contained in BuildOptions
// and replaces them to absolute paths.
func ResolveOptionPaths(options *Options) (_ *Options, err error) {
func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
localContext := false
if options.ContextPath != "" && options.ContextPath != "-" {
if !isRemoteURL(options.ContextPath) {
@@ -93,7 +56,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.NamedContexts = contexts
var cacheFrom []*pb.CacheOptionsEntry
var cacheFrom []*CacheOptionsEntry
for _, co := range options.CacheFrom {
switch co.Type {
case "local":
@@ -124,7 +87,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.CacheFrom = cacheFrom
var cacheTo []*pb.CacheOptionsEntry
var cacheTo []*CacheOptionsEntry
for _, co := range options.CacheTo {
switch co.Type {
case "local":
@@ -154,7 +117,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
}
options.CacheTo = cacheTo
var exports []*pb.ExportEntry
var exports []*ExportEntry
for _, e := range options.Exports {
if e.Destination != "" && e.Destination != "-" {
e.Destination, err = filepath.Abs(e.Destination)
@@ -166,7 +129,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.Exports = exports
var secrets []*pb.Secret
var secrets []*Secret
for _, s := range options.Secrets {
if s.FilePath != "" {
s.FilePath, err = filepath.Abs(s.FilePath)
@@ -178,7 +141,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.Secrets = secrets
var ssh []*pb.SSH
var ssh []*SSH
for _, s := range options.SSH {
var ps []string
for _, pt := range s.Paths {

View File

@@ -1,12 +1,12 @@
package build
package pb
import (
"os"
"path/filepath"
"testing"
"github.com/docker/buildx/controller/pb"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
)
func TestResolvePaths(t *testing.T) {
@@ -16,59 +16,59 @@ func TestResolvePaths(t *testing.T) {
require.NoError(t, os.Chdir(tmpwd))
tests := []struct {
name string
options *Options
want *Options
options *BuildOptions
want *BuildOptions
}{
{
name: "contextpath",
options: &Options{ContextPath: "test"},
want: &Options{ContextPath: filepath.Join(tmpwd, "test")},
options: &BuildOptions{ContextPath: "test"},
want: &BuildOptions{ContextPath: filepath.Join(tmpwd, "test")},
},
{
name: "contextpath-cwd",
options: &Options{ContextPath: "."},
want: &Options{ContextPath: tmpwd},
options: &BuildOptions{ContextPath: "."},
want: &BuildOptions{ContextPath: tmpwd},
},
{
name: "contextpath-dash",
options: &Options{ContextPath: "-"},
want: &Options{ContextPath: "-"},
options: &BuildOptions{ContextPath: "-"},
want: &BuildOptions{ContextPath: "-"},
},
{
name: "contextpath-ssh",
options: &Options{ContextPath: "git@github.com:docker/buildx.git"},
want: &Options{ContextPath: "git@github.com:docker/buildx.git"},
options: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
want: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
},
{
name: "dockerfilename",
options: &Options{DockerfileName: "test", ContextPath: "."},
want: &Options{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
options: &BuildOptions{DockerfileName: "test", ContextPath: "."},
want: &BuildOptions{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
},
{
name: "dockerfilename-dash",
options: &Options{DockerfileName: "-", ContextPath: "."},
want: &Options{DockerfileName: "-", ContextPath: tmpwd},
options: &BuildOptions{DockerfileName: "-", ContextPath: "."},
want: &BuildOptions{DockerfileName: "-", ContextPath: tmpwd},
},
{
name: "dockerfilename-remote",
options: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
want: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
options: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
want: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
},
{
name: "contexts",
options: &Options{NamedContexts: map[string]string{
options: &BuildOptions{NamedContexts: map[string]string{
"a": "test1", "b": "test2",
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
}},
want: &Options{NamedContexts: map[string]string{
want: &BuildOptions{NamedContexts: map[string]string{
"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
}},
},
{
name: "cache-from",
options: &Options{
CacheFrom: []*pb.CacheOptionsEntry{
options: &BuildOptions{
CacheFrom: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"src": "test"},
@@ -79,8 +79,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
CacheFrom: []*pb.CacheOptionsEntry{
want: &BuildOptions{
CacheFrom: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"src": filepath.Join(tmpwd, "test")},
@@ -94,8 +94,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "cache-to",
options: &Options{
CacheTo: []*pb.CacheOptionsEntry{
options: &BuildOptions{
CacheTo: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"dest": "test"},
@@ -106,8 +106,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
CacheTo: []*pb.CacheOptionsEntry{
want: &BuildOptions{
CacheTo: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"dest": filepath.Join(tmpwd, "test")},
@@ -121,8 +121,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "exports",
options: &Options{
Exports: []*pb.ExportEntry{
options: &BuildOptions{
Exports: []*ExportEntry{
{
Type: "local",
Destination: "-",
@@ -149,8 +149,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
Exports: []*pb.ExportEntry{
want: &BuildOptions{
Exports: []*ExportEntry{
{
Type: "local",
Destination: "-",
@@ -180,8 +180,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "secrets",
options: &Options{
Secrets: []*pb.Secret{
options: &BuildOptions{
Secrets: []*Secret{
{
FilePath: "test1",
},
@@ -195,8 +195,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
Secrets: []*pb.Secret{
want: &BuildOptions{
Secrets: []*Secret{
{
FilePath: filepath.Join(tmpwd, "test1"),
},
@@ -213,8 +213,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "ssh",
options: &Options{
SSH: []*pb.SSH{
options: &BuildOptions{
SSH: []*SSH{
{
ID: "default",
Paths: []string{"test1", "test2"},
@@ -225,8 +225,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
SSH: []*pb.SSH{
want: &BuildOptions{
SSH: []*SSH{
{
ID: "default",
Paths: []string{filepath.Join(tmpwd, "test1"), filepath.Join(tmpwd, "test2")},
@@ -244,7 +244,9 @@ func TestResolvePaths(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got, err := ResolveOptionPaths(tt.options)
require.NoError(t, err)
require.Equal(t, tt.want, got)
if !proto.Equal(tt.want, got) {
t.Fatalf("expected %#v, got %#v", tt.want, got)
}
})
}
}

162
controller/pb/progress.go Normal file
View File

@@ -0,0 +1,162 @@
package pb
import (
"time"
"github.com/docker/buildx/util/progress"
control "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/opencontainers/go-digest"
"google.golang.org/protobuf/types/known/timestamppb"
)
type writer struct {
ch chan<- *StatusResponse
}
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
return &writer{ch: ch}
}
func (w *writer) Write(status *client.SolveStatus) {
w.ch <- ToControlStatus(status)
}
func (w *writer) WriteBuildRef(target string, ref string) {}
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
return true
}
func (w *writer) ClearLogSource(interface{}) {}
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
resp := StatusResponse{}
for _, v := range s.Vertexes {
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
Digest: string(v.Digest),
Inputs: digestSliceToPB(v.Inputs),
Name: v.Name,
Started: timestampToPB(v.Started),
Completed: timestampToPB(v.Completed),
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range s.Statuses {
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
ID: v.ID,
Vertex: string(v.Vertex),
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: timestamppb.New(v.Timestamp),
Started: timestampToPB(v.Started),
Completed: timestampToPB(v.Completed),
})
}
for _, v := range s.Logs {
resp.Logs = append(resp.Logs, &control.VertexLog{
Vertex: string(v.Vertex),
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: timestamppb.New(v.Timestamp),
})
}
for _, v := range s.Warnings {
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
Vertex: string(v.Vertex),
Level: int64(v.Level),
Short: v.Short,
Detail: v.Detail,
Url: v.URL,
Info: v.SourceInfo,
Ranges: v.Range,
})
}
return &resp
}
func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: digest.Digest(v.Digest),
Inputs: digestSliceFromPB(v.Inputs),
Name: v.Name,
Started: timestampFromPB(v.Started),
Completed: timestampFromPB(v.Completed),
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: digest.Digest(v.Vertex),
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp.AsTime(),
Started: timestampFromPB(v.Started),
Completed: timestampFromPB(v.Completed),
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: digest.Digest(v.Vertex),
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp.AsTime(),
})
}
for _, v := range resp.Warnings {
s.Warnings = append(s.Warnings, &client.VertexWarning{
Vertex: digest.Digest(v.Vertex),
Level: int(v.Level),
Short: v.Short,
Detail: v.Detail,
URL: v.Url,
SourceInfo: v.Info,
Range: v.Ranges,
})
}
return &s
}
func timestampFromPB(ts *timestamppb.Timestamp) *time.Time {
if ts == nil {
return nil
}
t := ts.AsTime()
if t.IsZero() {
return nil
}
return &t
}
func timestampToPB(ts *time.Time) *timestamppb.Timestamp {
if ts == nil {
return nil
}
return timestamppb.New(*ts)
}
func digestSliceFromPB(elems []string) []digest.Digest {
clone := make([]digest.Digest, len(elems))
for i, e := range elems {
clone[i] = digest.Digest(e)
}
return clone
}
func digestSliceToPB(elems []digest.Digest) []string {
clone := make([]string, len(elems))
for i, e := range elems {
clone[i] = string(e)
}
return clone
}

View File

@@ -5,12 +5,6 @@ import (
"github.com/moby/buildkit/session/secrets/secretsprovider"
)
type Secret struct {
ID string
FilePath string
Env string
}
func CreateSecrets(secrets []*Secret) (session.Attachable, error) {
fs := make([]secretsprovider.Source, 0, len(secrets))
for _, secret := range secrets {

View File

@@ -1,23 +1,16 @@
package pb
import (
"slices"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/sshforward/sshprovider"
)
type SSH struct {
ID string
Paths []string
}
func CreateSSH(ssh []*SSH) (session.Attachable, error) {
configs := make([]sshprovider.AgentConfig, 0, len(ssh))
for _, ssh := range ssh {
cfg := sshprovider.AgentConfig{
ID: ssh.ID,
Paths: slices.Clone(ssh.Paths),
Paths: append([]string{}, ssh.Paths...),
}
configs = append(configs, cfg)
}

View File

@@ -1,11 +0,0 @@
package pb
type UlimitOpt struct {
Values map[string]*Ulimit
}
type Ulimit struct {
Name string
Hard int64
Soft int64
}

View File

@@ -39,7 +39,7 @@ func (p *Process) Done() <-chan error {
return p.errCh
}
// Manager manages a set of processes.
// Manager manages a set of proceses.
type Manager struct {
container atomic.Value
processes sync.Map
@@ -73,9 +73,9 @@ func (m *Manager) CancelRunningProcesses() {
}
// ListProcesses lists all running processes.
func (m *Manager) ListProcesses() (res []*ProcessInfo) {
func (m *Manager) ListProcesses() (res []*pb.ProcessInfo) {
m.processes.Range(func(key, value any) bool {
res = append(res, &ProcessInfo{
res = append(res, &pb.ProcessInfo{
ProcessID: key.(string),
InvokeConfig: value.(*Process).invokeConfig,
})
@@ -154,8 +154,3 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
return p, nil
}
type ProcessInfo struct {
ProcessID string
InvokeConfig *pb.InvokeConfig
}

243
controller/remote/client.go Normal file
View File

@@ -0,0 +1,243 @@
package remote
import (
"context"
"io"
"sync"
"time"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/build"
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials/insecure"
)
func NewClient(ctx context.Context, addr string) (*Client, error) {
backoffConfig := backoff.DefaultConfig
backoffConfig.MaxDelay = 3 * time.Second
connParams := grpc.ConnectParams{
Backoff: backoffConfig,
}
gopts := []grpc.DialOption{
//nolint:staticcheck // ignore SA1019: WithBlock is deprecated and does not work with NewClient.
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor),
}
//nolint:staticcheck // ignore SA1019: Recommended NewClient has different behavior from DialContext.
conn, err := grpc.DialContext(ctx, dialer.DialAddress(addr), gopts...)
if err != nil {
return nil, err
}
return &Client{conn: conn}, nil
}
type Client struct {
conn *grpc.ClientConn
closeOnce sync.Once
}
func (c *Client) Close() (err error) {
c.closeOnce.Do(func() {
err = c.conn.Close()
})
return
}
func (c *Client) Version(ctx context.Context) (string, string, string, error) {
res, err := c.client().Info(ctx, &pb.InfoRequest{})
if err != nil {
return "", "", "", err
}
v := res.BuildxVersion
return v.Package, v.Version, v.Revision, nil
}
func (c *Client) List(ctx context.Context) (keys []string, retErr error) {
res, err := c.client().List(ctx, &pb.ListRequest{})
if err != nil {
return nil, err
}
return res.Keys, nil
}
func (c *Client) Disconnect(ctx context.Context, sessionID string) error {
if sessionID == "" {
return nil
}
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{SessionID: sessionID})
return err
}
func (c *Client) ListProcesses(ctx context.Context, sessionID string) (infos []*pb.ProcessInfo, retErr error) {
res, err := c.client().ListProcesses(ctx, &pb.ListProcessesRequest{SessionID: sessionID})
if err != nil {
return nil, err
}
return res.Infos, nil
}
func (c *Client) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
_, err := c.client().DisconnectProcess(ctx, &pb.DisconnectProcessRequest{SessionID: sessionID, ProcessID: pid})
return err
}
func (c *Client) Invoke(ctx context.Context, sessionID string, pid string, invokeConfig *pb.InvokeConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
if sessionID == "" || pid == "" {
return errors.New("build session ID must be specified")
}
stream, err := c.client().Invoke(ctx)
if err != nil {
return err
}
return attachIO(ctx, stream, &pb.InitMessage{SessionID: sessionID, ProcessID: pid, InvokeConfig: invokeConfig}, ioAttachConfig{
stdin: in,
stdout: stdout,
stderr: stderr,
// TODO: Signal, Resize
})
}
func (c *Client) Inspect(ctx context.Context, sessionID string) (*pb.InspectResponse, error) {
return c.client().Inspect(ctx, &pb.InspectRequest{SessionID: sessionID})
}
func (c *Client) Build(ctx context.Context, options *pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
ref := identity.NewID()
statusChan := make(chan *client.SolveStatus)
eg, egCtx := errgroup.WithContext(ctx)
var resp *client.SolveResponse
eg.Go(func() error {
defer close(statusChan)
var err error
resp, err = c.build(egCtx, ref, options, in, statusChan)
return err
})
eg.Go(func() error {
for s := range statusChan {
st := s
progress.Write(st)
}
return nil
})
return ref, resp, nil, eg.Wait()
}
func (c *Client) build(ctx context.Context, sessionID string, options *pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
eg, egCtx := errgroup.WithContext(ctx)
done := make(chan struct{})
var resp *client.SolveResponse
eg.Go(func() error {
defer close(done)
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
SessionID: sessionID,
Options: options,
})
if err != nil {
return err
}
resp = &client.SolveResponse{
ExporterResponse: pbResp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
SessionID: sessionID,
})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
statusChan <- pb.FromControlStatus(resp)
}
})
if in != nil {
eg.Go(func() error {
stream, err := c.client().Input(egCtx)
if err != nil {
return err
}
if err := stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Init{
Init: &pb.InputInitMessage{
SessionID: sessionID,
},
},
}); err != nil {
return errors.Wrap(err, "failed to init input")
}
inReader, inWriter := io.Pipe()
eg2, _ := errgroup.WithContext(ctx)
eg2.Go(func() error {
<-done
return inWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(inWriter, in)
inWriter.Close()
}()
eg2.Go(func() error {
for {
buf := make([]byte, 32*1024)
n, err := inReader.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if err := stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
EOF: true,
},
},
})
})
return eg2.Wait()
})
}
return resp, eg.Wait()
}
func (c *Client) client() pb.ControllerClient {
return pb.NewControllerClient(c.conn)
}

View File

@@ -0,0 +1,335 @@
//go:build linux
package remote
import (
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/containerd/log"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
)
const (
serveCommandName = "_INTERNAL_SERVE"
)
var (
defaultLogFilename = fmt.Sprintf("buildx.%s.log", version.Revision)
defaultSocketFilename = fmt.Sprintf("buildx.%s.sock", version.Revision)
defaultPIDFilename = fmt.Sprintf("buildx.%s.pid", version.Revision)
)
type serverConfig struct {
// Specify buildx server root
Root string `toml:"root"`
// LogLevel sets the logging level [trace, debug, info, warn, error, fatal, panic]
LogLevel string `toml:"log_level"`
// Specify file to output buildx server log
LogFile string `toml:"log_file"`
}
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
rootDir := opts.Root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
serverRoot := filepath.Join(rootDir, "shared")
// connect to buildx server if it is already running
ctx2, cancel := context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled))
if err != nil {
if !errors.Is(err, context.DeadlineExceeded) {
return nil, errors.Wrap(err, "cannot connect to the buildx server")
}
} else {
return &buildxController{c, serverRoot}, nil
}
// start buildx server via subcommand
err = logger.Wrap("no buildx server found; launching...", func() error {
launchFlags := []string{}
if opts.ServerConfig != "" {
launchFlags = append(launchFlags, "--config", opts.ServerConfig)
}
logFile, err := getLogFilePath(dockerCli, opts.ServerConfig)
if err != nil {
return err
}
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
if err != nil {
return err
}
go wait()
// wait for buildx server to be ready
ctx2, cancel = context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled))
if err != nil {
return errors.Wrap(err, "cannot connect to the buildx server")
}
return nil
})
if err != nil {
return nil, err
}
return &buildxController{c, serverRoot}, nil
}
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {
cmd.AddCommand(
serveCmd(dockerCli),
)
}
func serveCmd(dockerCli command.Cli) *cobra.Command {
var serverConfigPath string
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [OPTIONS]", serveCommandName),
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Parse config
config, err := getConfig(dockerCli, serverConfigPath)
if err != nil {
return err
}
if config.LogLevel == "" {
logrus.SetLevel(logrus.InfoLevel)
} else {
lvl, err := logrus.ParseLevel(config.LogLevel)
if err != nil {
return errors.Wrap(err, "failed to prepare logger")
}
logrus.SetLevel(lvl)
}
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: log.RFC3339NanoFixed,
})
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return err
}
pidF := filepath.Join(root, defaultPIDFilename)
if err := os.WriteFile(pidF, []byte(fmt.Sprintf("%d", os.Getpid())), 0600); err != nil {
return err
}
defer func() {
if err := os.Remove(pidF); err != nil {
logrus.Errorf("failed to clean up info file %q: %v", pidF, err)
}
}()
// prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
return cbuild.RunBuild(ctx, dockerCli, options, stdin, progress, true)
})
defer b.Close()
// serve server
addr := filepath.Join(root, defaultSocketFilename)
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { // avoid EADDRINUSE
return err
}
defer func() {
if err := os.Remove(addr); err != nil {
logrus.Errorf("failed to clean up socket %q: %v", addr, err)
}
}()
logrus.Infof("starting server at %q", addr)
l, err := net.Listen("unix", addr)
if err != nil {
return err
}
rpc := grpc.NewServer(
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
)
controllerapi.RegisterControllerServer(rpc, b)
doneCh := make(chan struct{})
errCh := make(chan error, 1)
go func() {
defer close(doneCh)
if err := rpc.Serve(l); err != nil {
errCh <- errors.Wrapf(err, "error on serving via socket %q", addr)
}
}()
var s os.Signal
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT)
signal.Notify(sigCh, syscall.SIGTERM)
select {
case err := <-errCh:
logrus.Errorf("got error %s, exiting", err)
return err
case s = <-sigCh:
logrus.Infof("got signal %s, exiting", s)
return nil
case <-doneCh:
logrus.Infof("rpc server done, exiting")
return nil
}
},
}
flags := cmd.Flags()
flags.StringVar(&serverConfigPath, "config", "", "Specify buildx server config file")
return cmd
}
func getLogFilePath(dockerCli command.Cli, configPath string) (string, error) {
config, err := getConfig(dockerCli, configPath)
if err != nil {
return "", err
}
if config.LogFile == "" {
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return "", err
}
return filepath.Join(root, defaultLogFilename), nil
}
return config.LogFile, nil
}
func getConfig(dockerCli command.Cli, configPath string) (*serverConfig, error) {
var defaultConfigPath bool
if configPath == "" {
defaultRoot := rootDataDir(dockerCli)
configPath = filepath.Join(defaultRoot, "config.toml")
defaultConfigPath = true
}
var config serverConfig
tree, err := toml.LoadFile(configPath)
if err != nil && !(os.IsNotExist(err) && defaultConfigPath) {
return nil, errors.Wrapf(err, "failed to read config %q", configPath)
} else if err == nil {
if err := tree.Unmarshal(&config); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal config %q", configPath)
}
}
return &config, nil
}
func prepareRootDir(dockerCli command.Cli, config *serverConfig) (string, error) {
rootDir := config.Root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
if rootDir == "" {
return "", errors.New("buildx root dir must be determined")
}
if err := os.MkdirAll(rootDir, 0700); err != nil {
return "", err
}
serverRoot := filepath.Join(rootDir, "shared")
if err := os.MkdirAll(serverRoot, 0700); err != nil {
return "", err
}
return serverRoot, nil
}
func rootDataDir(dockerCli command.Cli) string {
return filepath.Join(confutil.NewConfig(dockerCli).Dir(), "controller")
}
func newBuildxClientAndCheck(ctx context.Context, addr string) (*Client, error) {
c, err := NewClient(ctx, addr)
if err != nil {
return nil, err
}
p, v, r, err := c.Version(ctx)
if err != nil {
return nil, err
}
logrus.Debugf("connected to server (\"%v %v %v\")", p, v, r)
if !(p == version.Package && v == version.Version && r == version.Revision) {
return nil, errors.Errorf("version mismatch (client: \"%v %v %v\", server: \"%v %v %v\")", version.Package, version.Version, version.Revision, p, v, r)
}
return c, nil
}
type buildxController struct {
*Client
serverRoot string
}
func (c *buildxController) Kill(ctx context.Context) error {
pidB, err := os.ReadFile(filepath.Join(c.serverRoot, defaultPIDFilename))
if err != nil {
return err
}
pid, err := strconv.ParseInt(string(pidB), 10, 64)
if err != nil {
return err
}
if pid <= 0 {
return errors.New("no PID is recorded for buildx server")
}
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
if err := p.Signal(syscall.SIGINT); err != nil {
return err
}
// TODO: Should we send SIGKILL if process doesn't finish?
return nil
}
func launch(ctx context.Context, logFile string, args ...string) (func() error, error) {
// set absolute path of binary, since we set the working directory to the root
pathname, err := os.Executable()
if err != nil {
return nil, err
}
bCmd := exec.CommandContext(ctx, pathname, args...)
if logFile != "" {
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
defer f.Close()
bCmd.Stdout = f
bCmd.Stderr = f
}
bCmd.Stdin = nil
bCmd.Dir = "/"
bCmd.SysProcAttr = &syscall.SysProcAttr{
Setsid: true,
}
if err := bCmd.Start(); err != nil {
return nil, err
}
return bCmd.Wait, nil
}

View File

@@ -0,0 +1,19 @@
//go:build !linux
package remote
import (
"context"
"github.com/docker/buildx/controller/control"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
return nil, errors.New("remote buildx unsupported")
}
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {}

430
controller/remote/io.go Normal file
View File

@@ -0,0 +1,430 @@
package remote
import (
"context"
"io"
"syscall"
"time"
"github.com/docker/buildx/controller/pb"
"github.com/moby/sys/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type msgStream interface {
Send(*pb.Message) error
Recv() (*pb.Message, error)
}
type ioServerConfig struct {
stdin io.WriteCloser
stdout, stderr io.ReadCloser
// signalFn is a callback function called when a signal is reached to the client.
signalFn func(context.Context, syscall.Signal) error
// resizeFn is a callback function called when a resize event is reached to the client.
resizeFn func(context.Context, winSize) error
}
func serveIO(attachCtx context.Context, srv msgStream, initFn func(*pb.InitMessage) error, ioConfig *ioServerConfig) (err error) {
stdin, stdout, stderr := ioConfig.stdin, ioConfig.stdout, ioConfig.stderr
stream := &debugStream{srv, "server=" + time.Now().String()}
eg, ctx := errgroup.WithContext(attachCtx)
done := make(chan struct{})
msg, err := receive(ctx, stream)
if err != nil {
return err
}
init := msg.GetInit()
if init == nil {
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInput())
}
sessionID := init.SessionID
if sessionID == "" {
return errors.New("no session ID is provided")
}
if err := initFn(init); err != nil {
return errors.Wrap(err, "failed to initialize IO server")
}
if stdout != nil {
stdoutReader, stdoutWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdoutWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdoutWriter, stdout)
stdoutWriter.Close()
}()
eg.Go(func() error {
defer stdoutReader.Close()
return copyToStream(1, stream, stdoutReader)
})
}
if stderr != nil {
stderrReader, stderrWriter := io.Pipe()
eg.Go(func() error {
<-done
return stderrWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stderrWriter, stderr)
stderrWriter.Close()
}()
eg.Go(func() error {
defer stderrReader.Close()
return copyToStream(2, stream, stderrReader)
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if file.Fd != 0 {
return errors.Errorf("unexpected fd: %v", file.Fd)
}
if stdin == nil {
continue // no stdin destination is specified so ignore the data
}
if len(file.Data) > 0 {
_, err := stdin.Write(file.Data)
if err != nil {
return err
}
}
if file.EOF {
stdin.Close()
}
} else if resize := msg.GetResize(); resize != nil {
if ioConfig.resizeFn != nil {
ioConfig.resizeFn(ctx, winSize{
cols: resize.Cols,
rows: resize.Rows,
})
}
} else if sig := msg.GetSignal(); sig != nil {
if ioConfig.signalFn != nil {
syscallSignal, ok := signal.SignalMap[sig.Name]
if !ok {
continue
}
ioConfig.signalFn(ctx, syscallSignal)
}
} else {
return errors.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
type ioAttachConfig struct {
stdin io.ReadCloser
stdout, stderr io.WriteCloser
signal <-chan syscall.Signal
resize <-chan winSize
}
type winSize struct {
rows uint32
cols uint32
}
func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage, cfg ioAttachConfig) (retErr error) {
eg, ctx := errgroup.WithContext(ctx)
done := make(chan struct{})
if err := stream.Send(&pb.Message{
Input: &pb.Message_Init{
Init: initMessage,
},
}); err != nil {
return errors.Wrap(err, "failed to init")
}
if cfg.stdin != nil {
stdinReader, stdinWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdinWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdinWriter, cfg.stdin)
stdinWriter.Close()
}()
eg.Go(func() error {
defer stdinReader.Close()
return copyToStream(0, stream, stdinReader)
})
}
if cfg.signal != nil {
eg.Go(func() error {
names := signalNames()
for {
var sig syscall.Signal
select {
case sig = <-cfg.signal:
case <-done:
return nil
case <-ctx.Done():
return nil
}
name := names[sig]
if name == "" {
continue
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Signal{
Signal: &pb.SignalMessage{
Name: name,
},
},
}); err != nil {
return errors.Wrap(err, "failed to send signal")
}
}
})
}
if cfg.resize != nil {
eg.Go(func() error {
for {
var win winSize
select {
case win = <-cfg.resize:
case <-done:
return nil
case <-ctx.Done():
return nil
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Resize{
Resize: &pb.ResizeMessage{
Rows: win.rows,
Cols: win.cols,
},
},
}); err != nil {
return errors.Wrap(err, "failed to send resize")
}
}
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
eofs := make(map[uint32]struct{})
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if _, ok := eofs[file.Fd]; ok {
continue
}
var out io.WriteCloser
switch file.Fd {
case 1:
out = cfg.stdout
case 2:
out = cfg.stderr
default:
return errors.Errorf("unsupported fd %d", file.Fd)
}
if out == nil {
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
continue
}
if len(file.Data) > 0 {
if _, err := out.Write(file.Data); err != nil {
return err
}
}
if file.EOF {
eofs[file.Fd] = struct{}{}
}
} else {
return errors.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
msgCh := make(chan *pb.Message)
errCh := make(chan error)
go func() {
msg, err := stream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
return
}
errCh <- err
return
}
msgCh <- msg
}()
select {
case msg := <-msgCh:
return msg, nil
case err := <-errCh:
return nil, err
case <-ctx.Done():
return nil, context.Cause(ctx)
}
}
func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
for {
buf := make([]byte, 32*1024)
n, err := r.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if err := snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
EOF: true,
},
},
})
}
func signalNames() map[syscall.Signal]string {
m := make(map[syscall.Signal]string, len(signal.SignalMap))
for name, value := range signal.SignalMap {
m[value] = name
}
return m
}
type debugStream struct {
msgStream
prefix string
}
func (s *debugStream) Send(msg *pb.Message) error {
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|---> Resize Message (sender:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|---> Signal Message (sender:%v): %s", s.prefix, m.Signal.Name)
}
return s.msgStream.Send(msg)
}
func (s *debugStream) Recv() (*pb.Message, error) {
msg, err := s.msgStream.Recv()
if err != nil {
return nil, err
}
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|<--- Resize Message (receiver:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|<--- Signal Message (receiver:%v): %s", s.prefix, m.Signal.Name)
}
return msg, nil
}

445
controller/remote/server.go Normal file
View File

@@ -0,0 +1,445 @@
package remote
import (
"context"
"io"
"sync"
"sync/atomic"
"time"
"github.com/docker/buildx/build"
controllererrors "github.com/docker/buildx/controller/errdefs"
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, inp *build.Inputs, err error)
func NewServer(buildFunc BuildFunc) *Server {
return &Server{
buildFunc: buildFunc,
}
}
type Server struct {
buildFunc BuildFunc
session map[string]*session
sessionMu sync.Mutex
}
type session struct {
buildOnGoing atomic.Bool
statusChan chan *pb.StatusResponse
cancelBuild func(error)
buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter
result *build.ResultHandle
processes *processes.Manager
}
func (s *session) cancelRunningProcesses() {
s.processes.CancelRunningProcesses()
}
func (m *Server) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (res *pb.ListProcessesResponse, err error) {
m.sessionMu.Lock()
defer m.sessionMu.Unlock()
s, ok := m.session[req.SessionID]
if !ok {
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
}
res = new(pb.ListProcessesResponse)
res.Infos = append(res.Infos, s.processes.ListProcesses()...)
return res, nil
}
func (m *Server) DisconnectProcess(ctx context.Context, req *pb.DisconnectProcessRequest) (res *pb.DisconnectProcessResponse, err error) {
m.sessionMu.Lock()
defer m.sessionMu.Unlock()
s, ok := m.session[req.SessionID]
if !ok {
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
}
return res, s.processes.DeleteProcess(req.ProcessID)
}
func (m *Server) Info(ctx context.Context, req *pb.InfoRequest) (res *pb.InfoResponse, err error) {
return &pb.InfoResponse{
BuildxVersion: &pb.BuildxVersion{
Package: version.Package,
Version: version.Version,
Revision: version.Revision,
},
}, nil
}
func (m *Server) List(ctx context.Context, req *pb.ListRequest) (res *pb.ListResponse, err error) {
keys := make(map[string]struct{})
m.sessionMu.Lock()
for k := range m.session {
keys[k] = struct{}{}
}
m.sessionMu.Unlock()
var keysL []string
for k := range keys {
keysL = append(keysL, k)
}
return &pb.ListResponse{
Keys: keysL,
}, nil
}
func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("disconnect: empty session ID")
}
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
if s.cancelBuild != nil {
s.cancelBuild(errors.WithStack(context.Canceled))
}
s.cancelRunningProcesses()
if s.result != nil {
s.result.Done()
}
}
delete(m.session, sessionID)
m.sessionMu.Unlock()
return &pb.DisconnectResponse{}, nil
}
func (m *Server) Close() error {
m.sessionMu.Lock()
for k := range m.session {
if s, ok := m.session[k]; ok {
if s.cancelBuild != nil {
s.cancelBuild(errors.WithStack(context.Canceled))
}
s.cancelRunningProcesses()
}
}
m.sessionMu.Unlock()
return nil
}
func (m *Server) Inspect(ctx context.Context, req *pb.InspectRequest) (*pb.InspectResponse, error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("inspect: empty session ID")
}
var bo *pb.BuildOptions
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
bo = s.buildOptions
} else {
m.sessionMu.Unlock()
return nil, errors.Errorf("inspect: unknown key %v", sessionID)
}
m.sessionMu.Unlock()
return &pb.InspectResponse{Options: bo}, nil
}
func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResponse, error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("build: empty session ID")
}
// Prepare status channel and session
m.sessionMu.Lock()
if m.session == nil {
m.session = make(map[string]*session)
}
s, ok := m.session[sessionID]
if ok {
if !s.buildOnGoing.CompareAndSwap(false, true) {
m.sessionMu.Unlock()
return &pb.BuildResponse{}, errors.New("build ongoing")
}
s.cancelRunningProcesses()
s.result = nil
} else {
s = &session{}
s.buildOnGoing.Store(true)
}
s.processes = processes.NewManager()
statusChan := make(chan *pb.StatusResponse)
s.statusChan = statusChan
inR, inW := io.Pipe()
defer inR.Close()
s.inputPipe = inW
m.session[sessionID] = s
m.sessionMu.Unlock()
defer func() {
close(statusChan)
m.sessionMu.Lock()
s, ok := m.session[sessionID]
if ok {
s.statusChan = nil
s.buildOnGoing.Store(false)
}
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan)
// Build the specified request
ctx, cancel := context.WithCancelCause(ctx)
defer func() { cancel(errors.WithStack(context.Canceled)) }()
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
if res != nil {
s.result = res
s.cancelBuild = cancel
s.buildOptions = req.Options
m.session[sessionID] = s
if buildErr != nil {
var ref string
var ebr *desktop.ErrorWithBuildRef
if errors.As(buildErr, &ebr) {
ref = ebr.Ref
}
buildErr = controllererrors.WrapBuild(buildErr, sessionID, ref)
}
}
} else {
m.sessionMu.Unlock()
return nil, errors.Errorf("build: unknown session ID %v", sessionID)
}
m.sessionMu.Unlock()
if buildErr != nil {
return nil, buildErr
}
if resp == nil {
resp = &client.SolveResponse{}
}
return &pb.BuildResponse{
ExporterResponse: resp.ExporterResponse,
}, nil
}
func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error {
sessionID := req.SessionID
if sessionID == "" {
return errors.New("status: empty session ID")
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *pb.StatusResponse
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].statusChan == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
statusChan = m.session[sessionID].statusChan
m.sessionMu.Unlock()
break
}
// forward status
for ss := range statusChan {
if ss == nil {
break
}
if err := stream.Send(ss); err != nil {
return err
}
}
return nil
}
func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
// Get the target ref from init message
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
init := msg.GetInit()
if init == nil {
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInit())
}
sessionID := init.SessionID
if sessionID == "" {
return errors.New("input: no session ID is provided")
}
// Wait and get input stream pipe prepared by Build()
var inputPipeW *io.PipeWriter
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].inputPipe == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
inputPipeW = m.session[sessionID].inputPipe
m.sessionMu.Unlock()
break
}
// Forward input stream
eg, ctx := errgroup.WithContext(context.TODO())
done := make(chan struct{})
msgCh := make(chan *pb.InputMessage)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() (retErr error) {
defer close(done)
defer func() {
if retErr != nil {
inputPipeW.CloseWithError(retErr)
return
}
inputPipeW.Close()
}()
for {
var msg *pb.InputMessage
select {
case msg = <-msgCh:
case <-ctx.Done():
return context.Cause(ctx)
}
if msg == nil {
return nil
}
if data := msg.GetData(); data != nil {
if len(data.Data) > 0 {
_, err := inputPipeW.Write(data.Data)
if err != nil {
return err
}
}
if data.EOF {
return nil
}
}
}
})
return eg.Wait()
}
func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
containerIn, containerOut := ioset.Pipe()
defer func() { containerOut.Close(); containerIn.Close() }()
initDoneCh := make(chan *processes.Process)
initErrCh := make(chan error)
eg, egCtx := errgroup.WithContext(context.TODO())
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
eg.Go(func() error {
defer srvIOCancel(errors.WithStack(context.Canceled))
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
defer func() {
if retErr != nil {
initErrCh <- retErr
}
}()
sessionID := initMessage.SessionID
cfg := initMessage.InvokeConfig
m.sessionMu.Lock()
s, ok := m.session[sessionID]
if !ok {
m.sessionMu.Unlock()
return errors.Errorf("invoke: unknown session ID %v", sessionID)
}
m.sessionMu.Unlock()
pid := initMessage.ProcessID
if pid == "" {
return errors.Errorf("invoke: specify process ID")
}
proc, ok := s.processes.Get(pid)
if !ok {
// Start a new process.
if cfg == nil {
return errors.New("no container config is provided")
}
var err error
proc, err = s.processes.StartProcess(pid, s.result, cfg)
if err != nil {
return err
}
}
// Attach containerIn to this process
proc.ForwardIO(&containerIn, srvIOCancel)
initDoneCh <- proc
return nil
}, &ioServerConfig{
stdin: containerOut.Stdin,
stdout: containerOut.Stdout,
stderr: containerOut.Stderr,
// TODO: signal, resize
})
})
eg.Go(func() (rErr error) {
defer srvIOCancel(errors.WithStack(context.Canceled))
// Wait for init done
var proc *processes.Process
select {
case p := <-initDoneCh:
proc = p
case err := <-initErrCh:
return err
case <-egCtx.Done():
return egCtx.Err()
}
// Wait for IO done
select {
case <-srvIOCtx.Done():
return srvIOCtx.Err()
case err := <-proc.Done():
return err
case <-egCtx.Done():
return egCtx.Err()
}
})
return eg.Wait()
}

View File

@@ -48,8 +48,6 @@ target "lint" {
"linux/s390x",
"linux/ppc64le",
"linux/riscv64",
"netbsd/amd64",
"netbsd/arm64",
"openbsd/amd64",
"openbsd/arm64",
"windows/amd64",
@@ -169,8 +167,6 @@ target "binaries-cross" {
"linux/ppc64le",
"linux/riscv64",
"linux/s390x",
"netbsd/amd64",
"netbsd/arm64",
"openbsd/amd64",
"openbsd/arm64",
"windows/amd64",

View File

@@ -19,8 +19,8 @@ By default, Bake uses the following lookup order to find the configuration file:
3. `docker-compose.yml`
4. `docker-compose.yaml`
5. `docker-bake.json`
6. `docker-bake.hcl`
7. `docker-bake.override.json`
6. `docker-bake.override.json`
7. `docker-bake.hcl`
8. `docker-bake.override.hcl`
You can specify the file location explicitly using the `--file` flag:
@@ -221,10 +221,8 @@ The following table shows the complete list of attributes that you can assign to
| [`attest`](#targetattest) | List | Build attestations |
| [`cache-from`](#targetcache-from) | List | External cache sources |
| [`cache-to`](#targetcache-to) | List | External cache destinations |
| [`call`](#targetcall) | String | Specify the frontend method to call for the target. |
| [`context`](#targetcontext) | String | Set of files located in the specified path or URL |
| [`contexts`](#targetcontexts) | Map | Additional build contexts |
| [`description`](#targetdescription) | String | Description of a target |
| [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string |
| [`dockerfile`](#targetdockerfile) | String | Dockerfile location |
| [`inherits`](#targetinherits) | List | Inherit attributes from other targets |
@@ -285,11 +283,19 @@ The key takes a list of annotations, in the format of `KEY=VALUE`.
```hcl
target "default" {
output = [{ type = "image", name = "foo" }]
output = ["type=image,name=foo"]
annotations = ["org.opencontainers.image.authors=dvdksn"]
}
```
is the same as
```hcl
target "default" {
output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"]
}
```
By default, the annotation is added to image manifests. You can configure the
level of the annotations by adding a prefix to the annotation, containing a
comma-separated list of all the levels that you want to annotate. The following
@@ -297,7 +303,7 @@ example adds annotations to both the image index and manifests.
```hcl
target "default" {
output = [{ type = "image", name = "foo" }]
output = ["type=image,name=foo"]
annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
}
```
@@ -313,13 +319,8 @@ This attribute accepts the long-form CSV version of attestation parameters.
```hcl
target "default" {
attest = [
{
type = "provenance",
mode = "max",
},
{
type = "sbom",
}
"type=provenance,mode=min",
"type=sbom"
]
}
```
@@ -335,15 +336,8 @@ This takes a list value, so you can specify multiple cache sources.
```hcl
target "app" {
cache-from = [
{
type = "s3",
region = "eu-west-1",
bucket = "mybucket"
},
{
type = "registry",
ref = "user/repo:cache"
}
"type=s3,region=eu-west-1,bucket=mybucket",
"user/repo:cache",
]
}
```
@@ -359,14 +353,8 @@ This takes a list value, so you can specify multiple cache export targets.
```hcl
target "app" {
cache-to = [
{
type = "s3",
region = "eu-west-1",
bucket = "mybucket"
},
{
type = "inline",
}
"type=s3,region=eu-west-1,bucket=mybucket",
"type=inline"
]
}
```
@@ -383,13 +371,6 @@ target "app" {
}
```
Supported values are:
- `build` builds the target (default)
- `check`: evaluates [build checks](https://docs.docker.com/build/checks/) for the target
- `outline`: displays the target's build arguments and their default values if available
- `targets`: lists all Bake targets in the loaded definition, along with its [description](#targetdescription).
For more information about frontend methods, refer to the CLI reference for
[`docker buildx build --call`](https://docs.docker.com/reference/cli/docker/buildx/build/#call).
@@ -500,25 +481,6 @@ FROM baseapp
RUN echo "Hello world"
```
### `target.description`
Defines a human-readable description for the target, clarifying its purpose or
functionality.
```hcl
target "lint" {
description = "Runs golangci-lint to detect style errors"
args = {
GOLANGCI_LINT_VERSION = null
}
dockerfile = "lint.Dockerfile"
}
```
This attribute is useful when combined with the `docker buildx bake --list=targets`
option, providing a more informative output when listing the available build
targets in a Bake file.
### `target.dockerfile-inline`
Uses the string value as an inline Dockerfile for the build target.
@@ -873,7 +835,7 @@ The following example configures the target to use a cache-only output,
```hcl
target "default" {
output = [{ type = "cacheonly" }]
output = ["type=cacheonly"]
}
```
@@ -913,8 +875,8 @@ variable "HOME" {
target "default" {
secret = [
{ type = "env", id = "KUBECONFIG" },
{ type = "file", id = "aws", src = "${HOME}/.aws/credentials" },
"type=env,id=KUBECONFIG",
"type=file,id=aws,src=${HOME}/.aws/credentials"
]
}
```
@@ -958,7 +920,7 @@ This can be useful if you need to access private repositories during a build.
```hcl
target "default" {
ssh = [{ id = "default" }]
ssh = ["default"]
}
```

View File

@@ -17,7 +17,6 @@ Extended build capabilities with BuildKit
| [`debug`](buildx_debug.md) | Start debugger (EXPERIMENTAL) |
| [`dial-stdio`](buildx_dial-stdio.md) | Proxy current stdio streams to builder instance |
| [`du`](buildx_du.md) | Disk usage |
| [`history`](buildx_history.md) | Commands to work on build records |
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
| [`ls`](buildx_ls.md) | List builder instances |

View File

@@ -14,19 +14,18 @@ Build from a file
### Options
| Name | Type | Default | Description |
|:------------------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------|
| [`--allow`](#allow) | `stringArray` | | Allow build to access specified resources |
|:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
| `--allow` | `stringArray` | | Allow build to access specified resources |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
| [`--list`](#list) | `string` | | List targets or variables |
| `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` |
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
| [`--no-cache`](#no-cache) | `bool` | | Do not use cache when building the image |
| [`--print`](#print) | `bool` | | Print the options without building |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
| [`--pull`](#pull) | `bool` | | Always attempt to pull all referenced images |
| `--push` | `bool` | | Shorthand for `--set=*.output=type=registry` |
@@ -51,80 +50,6 @@ guide for introduction to writing bake files.
## Examples
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
```text
--allow=ENTITLEMENT[=VALUE]
```
Entitlements are designed to provide controlled access to privileged
operations. By default, Buildx and BuildKit operates with restricted
permissions to protect users and their systems from unintended side effects or
security risks. The `--allow` flag explicitly grants access to additional
entitlements, making it clear when a build or bake operation requires elevated
privileges.
In addition to BuildKit's `network.host` and `security.insecure` entitlements
(see [`docker buildx build --allow`](https://docs.docker.com/reference/cli/docker/buildx/build/#allow),
Bake supports file system entitlements that grant granular control over file
system access. These are particularly useful when working with builds that need
access to files outside the default working directory.
Bake supports the following filesystem entitlements:
- `--allow fs=<path|*>` - Grant read and write access to files outside of the
working directory.
- `--allow fs.read=<path|*>` - Grant read access to files outside of the
working directory.
- `--allow fs.write=<path|*>` - Grant write access to files outside of the
working directory.
The `fs` entitlements take a path value (relative or absolute) to a directory
on the filesystem. Alternatively, you can pass a wildcard (`*`) to allow Bake
to access the entire filesystem.
### Example: fs.read
Given the following Bake configuration, Bake would need to access the parent
directory, relative to the Bake file.
```hcl
target "app" {
context = "../src"
}
```
Assuming `docker buildx bake app` is executed in the same directory as the
`docker-bake.hcl` file, you would need to explicitly allow Bake to read from
the `../src` directory. In this case, the following invocations all work:
```console
$ docker buildx bake --allow fs.read=* app
$ docker buildx bake --allow fs.read=../src app
$ docker buildx bake --allow fs=* app
```
### Example: fs.write
The following `docker-bake.hcl` file requires write access to the `/tmp`
directory.
```hcl
target "app" {
output = "/tmp"
}
```
Assuming `docker buildx bake app` is executed outside of the `/tmp` directory,
you would need to allow the `fs.write` entitlement, either by specifying the
path or using a wildcard:
```console
$ docker buildx bake --allow fs=/tmp app
$ docker buildx bake --allow fs.write=/tmp app
$ docker buildx bake --allow fs.write=* app
```
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
@@ -176,42 +101,6 @@ $ docker buildx bake -f docker-bake.dev.hcl db webapp-release
See the [Bake file reference](https://docs.docker.com/build/bake/reference/)
for more details.
### <a name="list"></a> List targets and variables (--list)
The `--list` flag displays all available targets or variables in the Bake
configuration, along with a description (if set using the `description`
property in the Bake file).
To list all targets:
```console {title="List targets"}
$ docker buildx bake --list=targets
TARGET DESCRIPTION
binaries
default binaries
update-docs
validate
validate-golangci Validate .golangci.yml schema (does not run Go linter)
```
To list variables:
```console
$ docker buildx bake --list=variables
VARIABLE VALUE DESCRIPTION
REGISTRY docker.io/username Registry and namespace
IMAGE_NAME my-app Image name
GO_VERSION <null>
```
By default, the output of `docker buildx bake --list` is presented in a table
format. Alternatively, you can use a long-form CSV syntax and specify a
`format` attribute to output the list in JSON.
```console
$ docker buildx bake --list=type=targets,format=json
```
### <a name="metadata-file"></a> Write build results metadata to a file (--metadata-file)
Similar to [`buildx build --metadata-file`](buildx_build.md#metadata-file) but
@@ -350,19 +239,15 @@ $ docker buildx bake --set target.platform=linux/arm64
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
$ docker buildx bake --set target.platform+=linux/arm64 # appends 'linux/arm64' to the platform list
```
You can override the following fields:
* `annotations`
* `attest`
* `args`
* `cache-from`
* `cache-to`
* `context`
* `dockerfile`
* `entitlements`
* `labels`
* `load`
* `no-cache`
@@ -375,20 +260,3 @@ You can override the following fields:
* `ssh`
* `tags`
* `target`
You can append using `+=` operator for the following fields:
* `annotations
* `attest
* `cache-from`
* `cache-to`
* `entitlements
* `no-cache-filter`
* `output`
* `platform`
* `secrets`
* `ssh`
* `tags`
> [!NOTE]
> ¹ These fields already append by default.

View File

@@ -14,9 +14,9 @@ Start a build
### Options
| Name | Type | Default | Description |
|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|:----------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
| [`--allow`](#allow) | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
@@ -28,6 +28,7 @@ Start a build
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| `--iidfile` | `string` | | Write the image ID to a file |
| `--label` | `stringArray` | | Set metadata for an image |
@@ -38,13 +39,15 @@ Start a build
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
| `--pull` | `bool` | | Always attempt to pull all referenced images |
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
@@ -825,12 +828,8 @@ $ docker buildx build --platform=darwin .
--progress=VALUE
```
Set type of progress output. Supported values are:
- `auto` (default): Uses the `tty` mode if the client is a TTY, or `plain` otherwise
- `tty`: An interactive stream of the output with color and redrawing
- `plain`: Prints the raw build progress in a plaintext format
- `quiet`: Suppress the build output and print image ID on success (same as `--quiet`)
- `rawjson`: Prints the raw build progress as JSON lines
Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
output (default `auto`).
> [!NOTE]
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
@@ -941,7 +940,7 @@ $ docker buildx build --secret [type=file,]id=<ID>[,src=<FILEPATH>] .
###### `type=file` usage
In the following example, `type=file` is automatically detected because no
environment variable matching `aws` (the ID) is set.
environment variable mathing `aws` (the ID) is set.
```console
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .

View File

@@ -13,12 +13,15 @@ Start debugger (EXPERIMENTAL)
### Options
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
|:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
<!---MARKER_GEN_END-->

View File

@@ -10,9 +10,9 @@ Start a build
### Options
| Name | Type | Default | Description |
|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|:--------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
| `--allow` | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| `--annotation` | `stringArray` | | Add annotation to the image |
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
| `--build-arg` | `stringArray` | | Set build-time variables |
@@ -24,6 +24,7 @@ Start a build
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
| `--check` | `bool` | | Shorthand for `--call=check` |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| `--iidfile` | `string` | | Write the image ID to a file |
| `--label` | `stringArray` | | Set metadata for an image |
@@ -34,13 +35,15 @@ Start a build
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
| `--platform` | `stringArray` | | Set target platform for build |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
| `--pull` | `bool` | | Always attempt to pull all referenced images |
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |

View File

@@ -1,58 +0,0 @@
# docker buildx history
<!---MARKER_GEN_START-->
Commands to work on build records
### Subcommands
| Name | Description |
|:---------------------------------------|:-----------------------------------------------|
| [`export`](buildx_history_export.md) | Export a build into Docker Desktop bundle |
| [`import`](buildx_history_import.md) | Import a build into Docker Desktop |
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
| [`ls`](buildx_history_ls.md) | List build records |
| [`open`](buildx_history_open.md) | Open a build in Docker Desktop |
| [`rm`](buildx_history_rm.md) | Remove build records |
| [`trace`](buildx_history_trace.md) | Show the OpenTelemetry trace of a build record |
### Options
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
<!---MARKER_GEN_END-->
### Build references
Most `buildx history` subcommands accept a build reference to identify which
build to act on. You can specify the build in two ways:
- By build ID, fetched by `docker buildx history ls`:
```console
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output build.dockerbuild
```
- By relative offset, to refer to recent builds:
```console
docker buildx history export ^1 --output build.dockerbuild
```
- `^0` or no reference targets the most recent build
- `^1` refers to the build before the most recent
- `^2` refers to two builds back, and so on
Offset references are supported in the following `buildx history` commands:
- `logs`
- `inspect`
- `open`
- `trace`
- `export`
- `rm`

View File

@@ -1,81 +0,0 @@
# docker buildx history export
<!---MARKER_GEN_START-->
Export a build into Docker Desktop bundle
### Options
| Name | Type | Default | Description |
|:---------------------------------------|:---------|:--------|:-----------------------------------------|
| [`--all`](#all) | `bool` | | Export all records for the builder |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| [`-D`](#debug), [`--debug`](#debug) | `bool` | | Enable debug logging |
| [`-o`](#output), [`--output`](#output) | `string` | | Output file path |
<!---MARKER_GEN_END-->
## Description
Export one or more build records to `.dockerbuild` archive files. These archives
contain metadata, logs, and build outputs, and can be imported into Docker
Desktop or shared across environments.
## Examples
### <a name="output"></a> Export a single build to a custom file (--output)
```console
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output mybuild.dockerbuild
```
You can find build IDs by running:
```console
docker buildx history ls
```
### <a name="o"></a> Export multiple builds to individual `.dockerbuild` files (-o)
To export two builds to separate files:
```console
# Using build IDs
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 -o multi.dockerbuild
# Or using relative offsets
docker buildx history export ^1 ^2 -o multi.dockerbuild
```
Or use shell redirection:
```console
docker buildx history export ^1 > mybuild.dockerbuild
docker buildx history export ^2 > backend-build.dockerbuild
```
### <a name="all"></a> Export all build records to a file (--all)
Use the `--all` flag and redirect the output:
```console
docker buildx history export --all > all-builds.dockerbuild
```
Or use the `--output` flag:
```console
docker buildx history export --all -o all-builds.dockerbuild
```
### <a name="builder"></a> Use a specific builder instance (--builder)
```console
docker buildx history export --builder builder0 ^1 -o builder0-build.dockerbuild
```
### <a name="debug"></a> Enable debug logging (--debug)
```console
docker buildx history export --debug qu2gsuo8ejqrwdfii23xkkckt -o debug-build.dockerbuild
```

View File

@@ -1,47 +0,0 @@
# docker buildx history import
<!---MARKER_GEN_START-->
Import a build into Docker Desktop
### Options
| Name | Type | Default | Description |
|:---------------------------------|:--------------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Import from a file path |
<!---MARKER_GEN_END-->
## Description
Import a build record from a `.dockerbuild` archive into Docker Desktop. This
lets you view, inspect, and analyze builds created in other environments or CI
pipelines.
## Examples
### Import a `.dockerbuild` archive from standard input
```console
docker buildx history import < mybuild.dockerbuild
```
### <a name="file"></a> Import a build archive from a file (--file)
```console
docker buildx history import --file ./artifacts/backend-build.dockerbuild
```
### Open a build manually
By default, the `import` command automatically opens the imported build in Docker
Desktop. You don't need to run `open` unless you're opening a specific build
or re-opening it later.
If you've imported multiple builds, you can open one manually:
```console
docker buildx history open ci-build
```

View File

@@ -1,168 +0,0 @@
# docker buildx history inspect
<!---MARKER_GEN_START-->
Inspect a build
### Subcommands
| Name | Description |
|:-----------------------------------------------------|:---------------------------|
| [`attachment`](buildx_history_inspect_attachment.md) | Inspect a build attachment |
### Options
| Name | Type | Default | Description |
|:----------------------|:---------|:---------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`--format`](#format) | `string` | `pretty` | Format the output |
<!---MARKER_GEN_END-->
## Description
Inspect a build record to view metadata such as duration, status, build inputs,
platforms, outputs, and attached artifacts. You can also use flags to extract
provenance, SBOMs, or other detailed information.
## Examples
### Inspect the most recent build
```console
$ docker buildx history inspect
Name: buildx (binaries)
Context: .
Dockerfile: Dockerfile
VCS Repository: https://github.com/crazy-max/buildx.git
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
Target: binaries
Platforms: linux/amd64
Keep Git Dir: true
Started: 2025-02-07 11:56:24
Duration: 1m 1s
Build Steps: 16/16 (25% cached)
Image Resolve Mode: local
Materials:
URI DIGEST
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
Attachments:
DIGEST PLATFORM TYPE
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
```
### Inspect a specific build
```console
# Using a build ID
docker buildx history inspect qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history inspect ^1
```
### <a name="format"></a> Format the output (--format)
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
`json` or using a Go template.
**Pretty output**
```console
$ docker buildx history inspect
Name: buildx (binaries)
Context: .
Dockerfile: Dockerfile
VCS Repository: https://github.com/crazy-max/buildx.git
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
Target: binaries
Platforms: linux/amd64
Keep Git Dir: true
Started: 2025-02-07 11:56:24
Duration: 1m 1s
Build Steps: 16/16 (25% cached)
Image Resolve Mode: local
Materials:
URI DIGEST
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
Attachments:
DIGEST PLATFORM TYPE
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
```
**JSON output**
```console
$ docker buildx history inspect --format json
{
"Name": "buildx (binaries)",
"Ref": "5w7vkqfi0rf59hw4hnmn627r9",
"Context": ".",
"Dockerfile": "Dockerfile",
"VCSRepository": "https://github.com/crazy-max/buildx.git",
"VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361",
"Target": "binaries",
"Platform": [
"linux/amd64"
],
"KeepGitDir": true,
"StartedAt": "2025-02-07T12:01:05.75807272+01:00",
"CompletedAt": "2025-02-07T12:02:07.991778875+01:00",
"Duration": 62233706155,
"Status": "completed",
"NumCompletedSteps": 16,
"NumTotalSteps": 16,
"NumCachedSteps": 4,
"Config": {
"ImageResolveMode": "local"
},
"Materials": [
{
"URI": "pkg:docker/docker/dockerfile@1",
"Digests": [
"sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25"
]
},
{
"URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64",
"Digests": [
"sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037"
]
},
{
"URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64",
"Digests": [
"sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3"
]
}
],
"Attachments": [
{
"Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1",
"Type": "https://slsa.dev/provenance/v0.2"
}
]
}
```
**Go template output**
```console
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
```

View File

@@ -1,82 +0,0 @@
# docker buildx history inspect attachment
<!---MARKER_GEN_START-->
Inspect a build attachment
### Options
| Name | Type | Default | Description |
|:------------------|:---------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--platform` | `string` | | Platform of attachment |
| [`--type`](#type) | `string` | | Type of attachment |
<!---MARKER_GEN_END-->
## Description
Inspect a specific attachment from a build record, such as a provenance file or
SBOM. Attachments are optional artifacts stored with the build and may be
platform-specific.
## Examples
### <a name="type"></a> Inspect a provenance attachment from a build (--type)
Supported types include `provenance` and `sbom`.
```console
$ docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt --type provenance
{
"_type": "https://slsa.dev/provenance/v0.2",
"buildDefinition": {
"buildType": "https://build.docker.com/BuildKit@v1",
"externalParameters": {
"target": "app",
"platforms": ["linux/amd64"]
}
},
"runDetails": {
"builder": "docker",
"by": "ci@docker.com"
}
}
```
### Inspect a SBOM for linux/amd64
```console
$ docker buildx history inspect attachment ^0 \
--type sbom \
--platform linux/amd64
{
"bomFormat": "CycloneDX",
"specVersion": "1.5",
"version": 1,
"components": [
{
"type": "library",
"name": "alpine",
"version": "3.18.2"
}
]
}
```
### Inspect an attachment by digest
You can inspect an attachment directly using its digset, which you can get from
the `inspect` output:
```console
# Using a build ID
docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt sha256:abcdef123456...
# Or using a relative offset
docker buildx history inspect attachment ^0 sha256:abcdef123456...
```
Use `--type sbom` or `--type provenance` to filter attachments by type. To
inspect a specific attachment by digest, omit the `--type` flag.

Some files were not shown because too many files have changed in this diff Show More