Compare commits

..

6 Commits

Author SHA1 Message Date
Tõnis Tiigi
45a3a79246 Merge pull request #890 from AkihiroSuda/cherrypick-887
[v0.7 backport] docker-container: set UsernsMode only when needed
2021-12-16 11:40:40 -08:00
Akihiro Suda
72af779e8a docker-container: set UsernsMode only when needed
Set `UsernsMode="host"` only when the daemon is running in userns-remapping mode.

Fix issue 561

The issue will be also fixed in moby/moby PR 43084 (Docker 20.10.13).
This buildx PR helps users of old releases of Docker.

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit 5f8600f098)
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
2021-12-16 15:45:40 +09:00
Tõnis Tiigi
05846896d1 Merge pull request #858 from tonistiigi/v0.7-fix-dockerignore-star
[v0.7] vendor: patch docker and fsutil pattern matching
2021-11-25 12:11:11 -08:00
Tõnis Tiigi
906948782e Merge pull request #862 from crazy-max/0.7_bake-json
[v0.7 backport] bake: fix print output
2021-11-25 12:10:59 -08:00
CrazyMax
1e1cc940df bake: fix print output
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 316ca972b6)
2021-11-25 09:29:58 +01:00
Tonis Tiigi
e89ed1bcb6 vendor: patch docker and fsutil pattern matching
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
2021-11-22 13:23:06 -08:00
2228 changed files with 56527 additions and 194109 deletions

View File

@@ -1 +1,3 @@
bin/ bin/
cross-out/
release-out/

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
open-pull-requests-limit: 10
directory: "/"
schedule:
interval: "daily"
labels:
- "dependencies"
- "bot"

View File

@@ -1,9 +1,5 @@
name: build name: build
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
@@ -16,119 +12,51 @@ on:
branches: branches:
- 'master' - 'master'
- 'v[0-9]*' - 'v[0-9]*'
paths-ignore:
- 'README.md'
- 'docs/**'
env: env:
REPO_SLUG: "docker/buildx-bin" REPO_SLUG: "docker/buildx-bin"
DESTDIR: "./bin" REPO_SLUG_ORIGIN: "moby/buildkit:master"
RELEASE_OUT: "./release-out"
jobs: jobs:
test: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v1
with: with:
version: latest driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
- -
name: Test name: Test
uses: docker/bake-action@v2
with:
targets: test
set: |
*.cache-from=type=gha,scope=test
*.cache-to=type=gha,scope=test
-
name: Upload coverage
uses: codecov/codecov-action@v3
with:
directory: ${{ env.DESTDIR }}/coverage
prepare:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Create matrix
id: platforms
run: | run: |
echo ::set-output name=matrix::$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms') make test
- -
name: Show matrix name: Send to Codecov
run: | uses: codecov/codecov-action@v2
echo ${{ steps.platforms.outputs.matrix }}
binaries:
runs-on: ubuntu-latest
needs:
- prepare
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
steps:
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with: with:
version: latest file: ./coverage/coverage.txt
- -
name: Build name: Build binaries
uses: docker/bake-action@v2 run: |
with: make release
targets: release
set: |
*.platform=${{ matrix.platform }}
*.cache-from=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
*.cache-to=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
- -
name: Upload artifacts name: Upload artifacts
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v2
with: with:
name: buildx name: buildx
path: ${{ env.DESTDIR }}/release/* path: ${{ env.RELEASE_OUT }}/*
if-no-files-found: error if-no-files-found: error
bin-image:
runs-on: ubuntu-latest
if: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
- -
name: Docker meta name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v4 uses: docker/metadata-action@v3
with: with:
images: | images: |
${{ env.REPO_SLUG }} ${{ env.REPO_SLUG }}
@@ -140,78 +68,25 @@ jobs:
- -
name: Login to DockerHub name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v2 uses: docker/login-action@v1
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- -
name: Build and push image name: Build and push image
uses: docker/bake-action@v2 uses: docker/bake-action@v1
with: with:
files: | files: |
./docker-bake.hcl ./docker-bake.hcl
${{ steps.meta.outputs.bake-file }} ${{ steps.meta.outputs.bake-file }}
targets: image-cross targets: image-cross
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
set: |
*.cache-from=type=gha,scope=bin-image
*.cache-to=type=gha,scope=bin-image,mode=max
release:
runs-on: ubuntu-latest
needs:
- binaries
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Download binaries
uses: actions/download-artifact@v3
with:
name: buildx
path: ${{ env.DESTDIR }}
-
name: Create checksums
run: ./hack/hash-files
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
- -
name: GitHub Release name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
draft: true draft: true
files: ${{ env.DESTDIR }}/* files: ${{ env.RELEASE_OUT }}/*
buildkit-edge:
runs-on: ubuntu-latest
continue-on-error: true
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
driver-opts: image=moby/buildkit:master
buildkitd-flags: --debug
-
# Just run a bake target to check eveything runs fine
name: Build
uses: docker/bake-action@v2
with:
targets: binaries

View File

@@ -1,56 +0,0 @@
name: docs-release
on:
release:
types: [ released ]
jobs:
open-pr:
runs-on: ubuntu-latest
steps:
-
name: Checkout docs repo
uses: actions/checkout@v3
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
repository: docker/docs
ref: main
-
name: Prepare
run: |
rm -rf ./_data/buildx/*
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build docs
uses: docker/bake-action@v2
with:
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ github.event.release.name }}
targets: update-docs
set: |
*.output=/tmp/buildx-docs
env:
DOCS_FORMATS: yaml
-
name: Copy files
run: |
cp /tmp/buildx-docs/out/reference/*.yaml ./_data/buildx/
-
name: Commit changes
run: |
git add -A .
-
name: Create PR on docs repo
uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
push-to-fork: docker-tools-robot/docker.github.io
commit-message: "build: update buildx reference to ${{ github.event.release.name }}"
signoff: true
branch: dispatch/buildx-ref-${{ github.event.release.name }}
delete-branch: true
title: Update buildx reference to ${{ github.event.release.name }}
body: |
Update the buildx reference documentation to keep in sync with the latest release `${{ github.event.release.name }}`
draft: false

View File

@@ -1,61 +0,0 @@
# this workflow runs the remote validate bake target from docker/docker.github.io
# to check if yaml reference docs and markdown files used in this repo are still valid
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
name: docs-upstream
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches:
- 'master'
- 'v[0-9]*'
paths:
- '.github/workflows/docs-upstream.yml'
- 'docs/**'
pull_request:
paths:
- '.github/workflows/docs-upstream.yml'
- 'docs/**'
jobs:
docs-yaml:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
-
name: Build reference YAML docs
uses: docker/bake-action@v2
with:
targets: update-docs
set: |
*.output=/tmp/buildx-docs
*.cache-from=type=gha,scope=docs-yaml
*.cache-to=type=gha,scope=docs-yaml,mode=max
env:
DOCS_FORMATS: yaml
-
name: Upload reference YAML docs
uses: actions/upload-artifact@v3
with:
name: docs-yaml
path: /tmp/buildx-docs/out/reference
retention-days: 1
validate:
uses: docker/docs/.github/workflows/validate-upstream.yml@main
needs:
- docs-yaml
with:
repo: https://github.com/${{ github.repository }}
data-files-id: docs-yaml
data-files-folder: buildx

View File

@@ -1,9 +1,5 @@
name: e2e name: e2e
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
@@ -14,50 +10,10 @@ on:
branches: branches:
- 'master' - 'master'
- 'v[0-9]*' - 'v[0-9]*'
paths-ignore:
- 'README.md'
- 'docs/**'
env:
DESTDIR: "./bin"
jobs: jobs:
build:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
-
name: Build
uses: docker/bake-action@v2
with:
targets: binaries
set: |
*.cache-from=type=gha,scope=release
*.cache-from=type=gha,scope=binaries
*.cache-to=type=gha,scope=binaries
-
name: Rename binary
run: |
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: binary
path: ${{ env.DESTDIR }}/build
if-no-files-found: error
retention-days: 7
driver: driver:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
needs:
- build
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -65,7 +21,6 @@ jobs:
- docker - docker
- docker-container - docker-container
- kubernetes - kubernetes
- remote
buildkit: buildkit:
- moby/buildkit:buildx-stable-1 - moby/buildkit:buildx-stable-1
- moby/buildkit:master - moby/buildkit:master
@@ -76,13 +31,10 @@ jobs:
- mnode-false - mnode-false
- mnode-true - mnode-true
platforms: platforms:
- linux/amd64
- linux/amd64,linux/arm64 - linux/amd64,linux/arm64
include: include:
- driver: kubernetes - driver: kubernetes
driver-opt: qemu.install=true driver-opt: qemu.install=true
- driver: remote
endpoint: tcp://localhost:1234
exclude: exclude:
- driver: docker - driver: docker
multi-node: mnode-true multi-node: mnode-true
@@ -90,28 +42,18 @@ jobs:
buildkit-cfg: bkcfg-true buildkit-cfg: bkcfg-true
- driver: docker-container - driver: docker-container
multi-node: mnode-true multi-node: mnode-true
- driver: remote
multi-node: mnode-true
- driver: remote
buildkit-cfg: bkcfg-true
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@v2 uses: docker/setup-qemu-action@v1
if: matrix.driver == 'docker' || matrix.driver == 'docker-container' if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
- -
name: Install buildx name: Install buildx
uses: actions/download-artifact@v3
with:
name: binary
path: /home/runner/.docker/cli-plugins
-
name: Fix perms and check
run: | run: |
chmod +x /home/runner/.docker/cli-plugins/docker-buildx make install
docker buildx version docker buildx version
- -
name: Init env vars name: Init env vars
@@ -133,7 +75,7 @@ jobs:
- -
name: Install k3s name: Install k3s
if: matrix.driver == 'kubernetes' if: matrix.driver == 'kubernetes'
uses: debianmaster/actions-k3s@b9cf3f599fd118699a3c8a0d18a2f2bda6cf4ce4 uses: debianmaster/actions-k3s@v1.0.3
id: k3s id: k3s
with: with:
version: v1.21.2-k3s1 version: v1.21.2-k3s1
@@ -147,17 +89,6 @@ jobs:
if: matrix.driver == 'kubernetes' if: matrix.driver == 'kubernetes'
run: | run: |
kubectl get nodes kubectl get nodes
-
name: Launch remote buildkitd
if: matrix.driver == 'remote'
run: |
docker run -d \
--privileged \
--name=remote-buildkit \
-p 1234:1234 \
${{ matrix.buildkit }} \
--addr unix:///run/buildkit/buildkitd.sock \
--addr tcp://0.0.0.0:1234
- -
name: Test name: Test
run: | run: |
@@ -166,5 +97,4 @@ jobs:
BUILDKIT_IMAGE: ${{ matrix.buildkit }} BUILDKIT_IMAGE: ${{ matrix.buildkit }}
DRIVER: ${{ matrix.driver }} DRIVER: ${{ matrix.driver }}
DRIVER_OPT: ${{ matrix.driver-opt }} DRIVER_OPT: ${{ matrix.driver-opt }}
ENDPOINT: ${{ matrix.endpoint }}
PLATFORMS: ${{ matrix.platforms }} PLATFORMS: ${{ matrix.platforms }}

25
.github/workflows/godev.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
# Workflow used to make a request to proxy.golang.org to refresh cache on https://pkg.go.dev/github.com/docker/buildx
# when a released of buildx is produced
name: godev
on:
push:
tags:
- 'v*'
jobs:
update:
runs-on: ubuntu-latest
steps:
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.13
-
name: Call pkg.go.dev
run: |
go get github.com/${GITHUB_REPOSITORY}@${GITHUB_REF#refs/tags/}
env:
GO111MODULE: on
GOPROXY: https://proxy.golang.org

View File

@@ -1,9 +1,5 @@
name: validate name: validate
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
@@ -30,13 +26,23 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
- -
name: Run name: Run
run: | run: |
make ${{ matrix.target }} make ${{ matrix.target }}
validate-docs-yaml:
runs-on: ubuntu-latest
needs:
- validate
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Run
run: |
make docs
env:
FORMATS: yaml

5
.gitignore vendored
View File

@@ -1 +1,4 @@
/bin bin
coverage
cross-out
release-out

View File

@@ -12,29 +12,19 @@ linters:
- gofmt - gofmt
- govet - govet
- deadcode - deadcode
- depguard
- goimports - goimports
- ineffassign - ineffassign
- misspell - misspell
- unused - unused
- varcheck - varcheck
- revive - golint
- staticcheck - staticcheck
- typecheck - typecheck
- nolintlint - structcheck
disable-all: true disable-all: true
linters-settings:
depguard:
list-type: blacklist
include-go-root: true
packages:
# The io/ioutil package has been deprecated.
# https://go.dev/doc/go1.16#ioutil
- io/ioutil
issues: issues:
exclude-rules: exclude-rules:
- linters: - linters:
- revive - golint
text: "stutters" text: "stutters"

13
.yamllint.yml Normal file
View File

@@ -0,0 +1,13 @@
ignore: |
/vendor
extends: default
yaml-files:
- '*.yaml'
- '*.yml'
rules:
truthy: disable
line-length: disable
document-start: disable

View File

@@ -1,13 +1,12 @@
# syntax=docker/dockerfile:1.4 # syntax=docker/dockerfile:1.3
ARG GO_VERSION=1.19 ARG GO_VERSION=1.17
ARG XX_VERSION=1.1.2 ARG DOCKERD_VERSION=20.10.8
ARG DOCKERD_VERSION=20.10.14
FROM docker:$DOCKERD_VERSION AS dockerd-release FROM docker:$DOCKERD_VERSION AS dockerd-release
# xx is a helper for cross-compilation # xx is a helper for cross-compilation
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
@@ -15,30 +14,26 @@ FROM golatest AS gobase
COPY --from=xx / / COPY --from=xx / /
RUN apk add --no-cache file git RUN apk add --no-cache file git
ENV GOFLAGS=-mod=vendor ENV GOFLAGS=-mod=vendor
ENV CGO_ENABLED=0
WORKDIR /src WORKDIR /src
FROM gobase AS buildx-version FROM gobase AS buildx-version
RUN --mount=type=bind,target=. <<EOT RUN --mount=target=. \
set -e PKG=github.com/docker/buildx VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
mkdir /buildx-version echo "-X ${PKG}/version.Version=${VERSION} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
echo -n "$(./hack/git-meta version)" | tee /buildx-version/version echo -n "${VERSION}" | tee /tmp/.version;
echo -n "$(./hack/git-meta revision)" | tee /buildx-version/revision
EOT
FROM gobase AS buildx-build FROM gobase AS buildx-build
ENV CGO_ENABLED=0
ARG LDFLAGS="-w -s"
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN --mount=type=bind,target=. \ RUN --mount=type=bind,target=. \
--mount=type=cache,target=/root/.cache \ --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \ --mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
set -e set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
xx-go --wrap xx-verify --static /usr/bin/buildx
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
xx-verify --static /usr/bin/docker-buildx
EOT
FROM gobase AS test FROM buildx-build AS test
RUN --mount=type=bind,target=. \ RUN --mount=type=bind,target=. \
--mount=type=cache,target=/root/.cache \ --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \ --mount=type=cache,target=/go/pkg/mod \
@@ -49,13 +44,13 @@ FROM scratch AS test-coverage
COPY --from=test /tmp/coverage.txt /coverage.txt COPY --from=test /tmp/coverage.txt /coverage.txt
FROM scratch AS binaries-unix FROM scratch AS binaries-unix
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx COPY --from=buildx-build /usr/bin/buildx /
FROM binaries-unix AS binaries-darwin FROM binaries-unix AS binaries-darwin
FROM binaries-unix AS binaries-linux FROM binaries-unix AS binaries-linux
FROM scratch AS binaries-windows FROM scratch AS binaries-windows
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe COPY --from=buildx-build /usr/bin/buildx /buildx.exe
FROM binaries-$TARGETOS AS binaries FROM binaries-$TARGETOS AS binaries
@@ -64,11 +59,8 @@ FROM --platform=$BUILDPLATFORM alpine AS releaser
WORKDIR /work WORKDIR /work
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN --mount=from=binaries \ RUN --mount=from=binaries \
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT --mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \
set -e mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
mkdir -p /out
cp buildx* "/out/buildx-$(cat /buildx-version/version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
EOT
FROM scratch AS release FROM scratch AS release
COPY --from=releaser /out/ / COPY --from=releaser /out/ /

View File

@@ -152,7 +152,6 @@ made through a pull request.
people = [ people = [
"akihirosuda", "akihirosuda",
"crazy-max", "crazy-max",
"jedevc",
"tiborvass", "tiborvass",
"tonistiigi", "tonistiigi",
] ]
@@ -189,11 +188,6 @@ made through a pull request.
Email = "contact@crazymax.dev" Email = "contact@crazymax.dev"
GitHub = "crazy-max" GitHub = "crazy-max"
[people.jedevc]
Name = "Justin Chadwell"
Email = "me@jedevc.com"
GitHub = "jedevc"
[people.thajeztah] [people.thajeztah]
Name = "Sebastiaan van Stijn" Name = "Sebastiaan van Stijn"
Email = "github@gone.nl" Email = "github@gone.nl"

View File

@@ -4,77 +4,59 @@ else ifneq (, $(shell docker buildx version))
export BUILDX_CMD = docker buildx export BUILDX_CMD = docker buildx
else ifneq (, $(shell which buildx)) else ifneq (, $(shell which buildx))
export BUILDX_CMD = $(which buildx) export BUILDX_CMD = $(which buildx)
else
$(error "Buildx is required: https://github.com/docker/buildx#installing")
endif endif
export BUILDX_CMD ?= docker buildx export BIN_OUT = ./bin
export RELEASE_OUT = ./release-out
.PHONY: all
all: binaries
.PHONY: build
build:
./hack/build
.PHONY: shell
shell: shell:
./hack/shell ./hack/shell
.PHONY: binaries
binaries: binaries:
$(BUILDX_CMD) bake binaries $(BUILDX_CMD) bake binaries
.PHONY: binaries-cross
binaries-cross: binaries-cross:
$(BUILDX_CMD) bake binaries-cross $(BUILDX_CMD) bake binaries-cross
.PHONY: install
install: binaries install: binaries
mkdir -p ~/.docker/cli-plugins mkdir -p ~/.docker/cli-plugins
install bin/build/buildx ~/.docker/cli-plugins/docker-buildx install bin/buildx ~/.docker/cli-plugins/docker-buildx
.PHONY: release
release: release:
./hack/release ./hack/release
.PHONY: validate-all
validate-all: lint test validate-vendor validate-docs validate-all: lint test validate-vendor validate-docs
.PHONY: lint
lint: lint:
$(BUILDX_CMD) bake lint $(BUILDX_CMD) bake lint
.PHONY: test
test: test:
$(BUILDX_CMD) bake test $(BUILDX_CMD) bake test
.PHONY: validate-vendor
validate-vendor: validate-vendor:
$(BUILDX_CMD) bake validate-vendor $(BUILDX_CMD) bake validate-vendor
.PHONY: validate-docs
validate-docs: validate-docs:
$(BUILDX_CMD) bake validate-docs $(BUILDX_CMD) bake validate-docs
.PHONY: validate-authors
validate-authors: validate-authors:
$(BUILDX_CMD) bake validate-authors $(BUILDX_CMD) bake validate-authors
.PHONY: test-driver
test-driver: test-driver:
./hack/test-driver ./hack/test-driver
.PHONY: vendor
vendor: vendor:
./hack/update-vendor ./hack/update-vendor
.PHONY: docs
docs: docs:
./hack/update-docs ./hack/update-docs
.PHONY: authors
authors: authors:
$(BUILDX_CMD) bake update-authors $(BUILDX_CMD) bake update-authors
.PHONY: mod-outdated
mod-outdated: mod-outdated:
$(BUILDX_CMD) bake mod-outdated $(BUILDX_CMD) bake mod-outdated
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors

154
README.md
View File

@@ -1,10 +1,9 @@
# buildx # buildx
[![GitHub release](https://img.shields.io/github/release/docker/buildx.svg?style=flat-square)](https://github.com/docker/buildx/releases/latest) [![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white)](https://pkg.go.dev/github.com/docker/buildx)
[![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?style=flat-square&logo=go&logoColor=white)](https://pkg.go.dev/github.com/docker/buildx) [![Build Status](https://github.com/docker/buildx/workflows/build/badge.svg)](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
[![Build Status](https://img.shields.io/github/workflow/status/docker/buildx/build?label=build&logo=github&style=flat-square)](https://github.com/docker/buildx/actions?query=workflow%3Abuild) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/buildx)](https://goreportcard.com/report/github.com/docker/buildx)
[![Go Report Card](https://goreportcard.com/badge/github.com/docker/buildx?style=flat-square)](https://goreportcard.com/report/github.com/docker/buildx) [![codecov](https://codecov.io/gh/docker/buildx/branch/master/graph/badge.svg)](https://codecov.io/gh/docker/buildx)
[![codecov](https://img.shields.io/codecov/c/github/docker/buildx?logo=codecov&style=flat-square)](https://codecov.io/gh/docker/buildx)
`buildx` is a Docker CLI plugin for extended build capabilities with `buildx` is a Docker CLI plugin for extended build capabilities with
[BuildKit](https://github.com/moby/buildkit). [BuildKit](https://github.com/moby/buildkit).
@@ -22,30 +21,17 @@ Key features:
# Table of Contents # Table of Contents
- [Installing](#installing) - [Installing](#installing)
- [Windows and macOS](#windows-and-macos) - [Docker](#docker)
- [Linux packages](#linux-packages) - [Binary release](#binary-release)
- [Manual download](#manual-download) - [From `Dockerfile`](#from-dockerfile)
- [Dockerfile](#dockerfile)
- [Set buildx as the default builder](#set-buildx-as-the-default-builder) - [Set buildx as the default builder](#set-buildx-as-the-default-builder)
- [Building](#building) - [Building](#building)
- [Getting started](#getting-started) - [Getting started](#getting-started)
- [Building with buildx](#building-with-buildx) - [Building with buildx](#building-with-buildx)
- [Working with builder instances](#working-with-builder-instances) - [Working with builder instances](#working-with-builder-instances)
- [Building multi-platform images](#building-multi-platform-images) - [Building multi-platform images](#building-multi-platform-images)
- [Manuals](docs/manuals) - [High-level build options](#high-level-build-options)
- [High-level build options with Bake](docs/manuals/bake/index.md) - [Documentation](docs/reference/buildx.md)
- [Drivers](docs/manuals/drivers/index.md)
- [Exporters](docs/manuals/exporters/index.md)
- [Cache backends](docs/manuals/cache/backends/index.md)
- [Guides](docs/guides)
- [CI/CD](docs/guides/cicd.md)
- [CNI networking](docs/guides/cni-networking.md)
- [Using a custom network](docs/guides/custom-network.md)
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
- [OpenTelemetry support](docs/guides/opentelemetry.md)
- [Registry mirror](docs/guides/registry-mirror.md)
- [Resource limiting](docs/guides/resource-limiting.md)
- [Reference](docs/reference/buildx.md)
- [`buildx bake`](docs/reference/buildx_bake.md) - [`buildx bake`](docs/reference/buildx_bake.md)
- [`buildx build`](docs/reference/buildx_build.md) - [`buildx build`](docs/reference/buildx_build.md)
- [`buildx create`](docs/reference/buildx_create.md) - [`buildx create`](docs/reference/buildx_create.md)
@@ -70,64 +56,30 @@ Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
A limited set of functionality works with older versions of Docker when A limited set of functionality works with older versions of Docker when
invoking the binary directly. invoking the binary directly.
## Windows and macOS ## Docker
Docker Buildx is included in [Docker Desktop](https://docs.docker.com/desktop/) `buildx` comes bundled with Docker Desktop and in latest
for Windows and macOS. [Docker CE packages](https://docs.docker.com/engine/install/), but may not be
included in third-party software components (in which case follow the
[binary release](#binary-release) instructions).
## Linux packages ## Binary release
Docker Linux packages also include Docker Buildx when installed using the You can also download the latest `buildx` binary from the
[DEB or RPM packages](https://docs.docker.com/engine/install/). [GitHub releases](https://github.com/docker/buildx/releases/latest) page, copy
it to `~/.docker/cli-plugins` folder with name `docker-buildx` and change the
permission to execute:
## Manual download ```console
$ chmod a+x ~/.docker/cli-plugins/docker-buildx
```
> **Important** ## From `Dockerfile`
>
> This section is for unattended installation of the buildx component. These
> instructions are mostly suitable for testing purposes. We do not recommend
> installing buildx using manual download in production environments as they
> will not be updated automatically with security updates.
>
> On Windows and macOS, we recommend that you install [Docker Desktop](https://docs.docker.com/desktop/)
> instead. For Linux, we recommend that you follow the [instructions specific for your distribution](#linux-packages).
You can also download the latest binary from the [GitHub releases page](https://github.com/docker/buildx/releases/latest). Here is how to use buildx inside a Dockerfile through the
Rename the relevant binary and copy it to the destination matching your OS:
| OS | Binary name | Destination folder |
| -------- | -------------------- | -----------------------------------------|
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
Or copy it into one of these folders for installing it system-wide.
On Unix environments:
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
On Windows:
* `C:\ProgramData\Docker\cli-plugins`
* `C:\Program Files\Docker\cli-plugins`
> **Note**
>
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
> ```shell
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
> ```
## Dockerfile
Here is how to install and use Buildx inside a Dockerfile through the
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image: [`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
```dockerfile ```Dockerfile
# syntax=docker/dockerfile:1
FROM docker FROM docker
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx
RUN docker buildx version RUN docker buildx version
@@ -145,17 +97,17 @@ To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_unin
```console ```console
# Buildx 0.6+ # Buildx 0.6+
$ docker buildx bake "https://github.com/docker/buildx.git" $ docker buildx bake "git://github.com/docker/buildx"
$ mkdir -p ~/.docker/cli-plugins $ mkdir -p ~/.docker/cli-plugins
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx $ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
# Docker 19.03+ # Docker 19.03+
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git" $ DOCKER_BUILDKIT=1 docker build --platform=local -o . "git://github.com/docker/buildx"
$ mkdir -p ~/.docker/cli-plugins $ mkdir -p ~/.docker/cli-plugins
$ mv buildx ~/.docker/cli-plugins/docker-buildx $ mv buildx ~/.docker/cli-plugins/docker-buildx
# Local # Local
$ git clone https://github.com/docker/buildx.git && cd buildx $ git clone git://github.com/docker/buildx && cd buildx
$ make install $ make install
``` ```
@@ -189,22 +141,28 @@ specifying target platform. In addition, Buildx also supports new features that
are not yet available for regular `docker build` like building manifest lists, are not yet available for regular `docker build` like building manifest lists,
distributed caching, and exporting build results to OCI image tarballs. distributed caching, and exporting build results to OCI image tarballs.
Buildx is flexible and can be run in different configurations that are exposed Buildx is supposed to be flexible and can be run in different configurations
through various "drivers". Each driver defines how and where a build should that are exposed through a driver concept. Currently, we support a
run, and have different feature sets. [`docker` driver](docs/reference/buildx_create.md#docker-driver) that uses
the BuildKit library bundled into the Docker daemon binary, a
[`docker-container` driver](docs/reference/buildx_create.md#docker-container-driver)
that automatically launches BuildKit inside a Docker container and a
[`kubernetes` driver](docs/reference/buildx_create.md#kubernetes-driver) to
spin up pods with defined BuildKit container image to build your images. We
plan to add more drivers in the future.
We currently support the following drivers: The user experience of using buildx is very similar across drivers, but there
- The `docker` driver ([guide](docs/manuals/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver)) are some features that are not currently supported by the `docker` driver,
- The `docker-container` driver ([guide](docs/manuals/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver)) because the BuildKit library bundled into docker daemon currently uses a
- The `kubernetes` driver ([guide](docs/manuals/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver)) different storage component. In contrast, all images built with `docker` driver
- The `remote` driver ([guide](docs/manuals/drivers/remote.md)) are automatically added to the `docker images` view by default, whereas when
using other drivers the method for outputting an image needs to be selected
For more information on drivers, see the [drivers guide](docs/manuals/drivers/index.md). with `--output`.
## Working with builder instances ## Working with builder instances
By default, buildx will initially use the `docker` driver if it is supported, By default, buildx will initially use the `docker` driver if it is supported,
providing a very similar user experience to the native `docker build`. Note that providing a very similar user experience to the native `docker build`. Note tha
you must use a local shared daemon to build your applications. you must use a local shared daemon to build your applications.
Buildx allows you to create new instances of isolated builders. This can be Buildx allows you to create new instances of isolated builders. This can be
@@ -302,7 +260,6 @@ inside your Dockerfile and can be leveraged by the processes running as part
of your build. of your build.
```dockerfile ```dockerfile
# syntax=docker/dockerfile:1
FROM --platform=$BUILDPLATFORM golang:alpine AS build FROM --platform=$BUILDPLATFORM golang:alpine AS build
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG BUILDPLATFORM ARG BUILDPLATFORM
@@ -316,7 +273,26 @@ cross-compilation helpers for more advanced use-cases.
## High-level build options ## High-level build options
See [`docs/manuals/bake/index.md`](docs/manuals/bake/index.md) for more details. Buildx also aims to provide support for high-level build concepts that go beyond
invoking a single build command. We want to support building all the images in
your application together and let the users define project specific reusable
build flows that can then be easily invoked by anyone.
BuildKit efficiently handles multiple concurrent build requests and
de-duplicating work. The build commands can be combined with general-purpose
command runners (for example, `make`). However, these tools generally invoke
builds in sequence and therefore cannot leverage the full potential of BuildKit
parallelization, or combine BuildKits output for the user. For this use case,
we have added a command called [`docker buildx bake`](docs/reference/buildx_bake.md).
The `bake` command supports building images from compose files, similar to
[`docker-compose build`](https://docs.docker.com/compose/reference/build/),
but allowing all the services to be built concurrently as part of a single
request.
There is also support for custom build rules from HCL/JSON files allowing
better code reuse and different target groups. The design of bake is in very
early stages and we are looking for feedback from users.
# Contributing # Contributing

View File

@@ -4,12 +4,10 @@ import (
"context" "context"
"encoding/csv" "encoding/csv"
"fmt" "fmt"
"io" "io/ioutil"
"os" "os"
"path" "path"
"path/filepath"
"regexp" "regexp"
"sort"
"strconv" "strconv"
"strings" "strings"
@@ -17,21 +15,15 @@ import (
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/util/buildflags" "github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/cli/cli/config" "github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/builder/remotecontext/urlutil"
hcl "github.com/hashicorp/hcl/v2" hcl "github.com/hashicorp/hcl/v2"
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/auth/authprovider"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var ( var httpPrefix = regexp.MustCompile(`^https?://`)
httpPrefix = regexp.MustCompile(`^https?://`) var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
validTargetNameChars = `[a-zA-Z0-9_-]+`
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
)
type File struct { type File struct {
Name string Name string
@@ -63,132 +55,54 @@ func ReadLocalFiles(names []string) ([]File, error) {
out := make([]File, 0, len(names)) out := make([]File, 0, len(names))
for _, n := range names { for _, n := range names {
var dt []byte dt, err := ioutil.ReadFile(n)
var err error
if n == "-" {
dt, err = io.ReadAll(os.Stdin)
if err != nil {
return nil, err
}
} else {
dt, err = os.ReadFile(n)
if err != nil { if err != nil {
if isDefault && errors.Is(err, os.ErrNotExist) { if isDefault && errors.Is(err, os.ErrNotExist) {
continue continue
} }
return nil, err return nil, err
} }
}
out = append(out, File{Name: n, Data: dt}) out = append(out, File{Name: n, Data: dt})
} }
return out, nil return out, nil
} }
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) { func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, []*Group, error) {
c, err := ParseFiles(files, defaults) c, err := ParseFiles(files, defaults)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
for i, t := range targets {
targets[i] = sanitizeTargetName(t)
}
o, err := c.newOverrides(overrides) o, err := c.newOverrides(overrides)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
m := map[string]*Target{} m := map[string]*Target{}
n := map[string]*Group{} for _, n := range targets {
for _, target := range targets { for _, n := range c.ResolveGroup(n) {
ts, gs := c.ResolveGroup(target) t, err := c.ResolveTarget(n, o)
for _, tname := range ts {
t, err := c.ResolveTarget(tname, o)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if t != nil { if t != nil {
m[tname] = t m[n] = t
} }
} }
for _, gname := range gs { }
var g []*Group
if len(targets) == 0 || (len(targets) == 1 && targets[0] == "default") {
for _, group := range c.Groups { for _, group := range c.Groups {
if group.Name == gname { if group.Name != "default" {
n[gname] = group
break
}
}
}
}
for _, target := range targets {
if target == "default" {
continue continue
} }
if _, ok := n["default"]; !ok { g = []*Group{{Targets: group.Targets}}
n["default"] = &Group{Name: "default"}
} }
n["default"].Targets = append(n["default"].Targets, target) } else {
} g = []*Group{{Targets: targets}}
if g, ok := n["default"]; ok {
g.Targets = dedupSlice(g.Targets)
} }
for name, t := range m { return m, g, nil
if err := c.loadLinks(name, t, m, o, nil); err != nil {
return nil, nil, err
}
}
return m, n, nil
}
func dedupSlice(s []string) []string {
if len(s) == 0 {
return s
}
var res []string
seen := make(map[string]struct{})
for _, val := range s {
if _, ok := seen[val]; !ok {
res = append(res, val)
seen[val] = struct{}{}
}
}
return res
}
func dedupMap(ms ...map[string]string) map[string]string {
if len(ms) == 0 {
return nil
}
res := map[string]string{}
for _, m := range ms {
if len(m) == 0 {
continue
}
for k, v := range m {
if _, ok := res[k]; !ok {
res[k] = v
}
}
}
return res
}
func sliceToMap(env []string) (res map[string]string) {
res = make(map[string]string)
for _, s := range env {
kv := strings.SplitN(s, "=", 2)
key := kv[0]
switch {
case len(kv) == 1:
res[key] = ""
default:
res[key] = kv[1]
}
}
return
} }
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) { func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
@@ -197,15 +111,15 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
}() }()
var c Config var c Config
var composeFiles []File var fs []*hcl.File
var hclFiles []*hcl.File
for _, f := range files { for _, f := range files {
isCompose, composeErr := validateComposeFile(f.Data, f.Name) cfg, isCompose, composeErr := ParseComposeFile(f.Data, f.Name)
if isCompose { if isCompose {
if composeErr != nil { if composeErr != nil {
return nil, composeErr return nil, composeErr
} }
composeFiles = append(composeFiles, f) c = mergeConfig(c, *cfg)
c = dedupeConfig(c)
} }
if !isCompose { if !isCompose {
hf, isHCL, err := ParseHCLFile(f.Data, f.Name) hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
@@ -213,7 +127,7 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
hclFiles = append(hclFiles, hf) fs = append(fs, hf)
} else if composeErr != nil { } else if composeErr != nil {
return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err) return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err)
} else { } else {
@@ -222,43 +136,26 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
} }
} }
if len(composeFiles) > 0 { if len(fs) > 0 {
cfg, cmperr := ParseComposeFiles(composeFiles) if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
if cmperr != nil {
return nil, errors.Wrap(cmperr, "failed to parse compose file")
}
c = mergeConfig(c, *cfg)
c = dedupeConfig(c)
}
if len(hclFiles) > 0 {
if err := hclparser.Parse(hcl.MergeFiles(hclFiles), hclparser.Opt{
LookupVar: os.LookupEnv, LookupVar: os.LookupEnv,
Vars: defaults, Vars: defaults,
ValidateLabel: validateTargetName,
}, &c); err.HasErrors() { }, &c); err.HasErrors() {
return nil, err return nil, err
} }
} }
return &c, nil return &c, nil
} }
func dedupeConfig(c Config) Config { func dedupeConfig(c Config) Config {
c2 := c c2 := c
c2.Groups = make([]*Group, 0, len(c2.Groups))
for _, g := range c.Groups {
g1 := *g
g1.Targets = dedupSlice(g1.Targets)
c2.Groups = append(c2.Groups, &g1)
}
c2.Targets = make([]*Target, 0, len(c2.Targets)) c2.Targets = make([]*Target, 0, len(c2.Targets))
mt := map[string]*Target{} m := map[string]*Target{}
for _, t := range c.Targets { for _, t := range c.Targets {
if t2, ok := mt[t.Name]; ok { if t2, ok := m[t.Name]; ok {
t2.Merge(t) t2.Merge(t)
} else { } else {
mt[t.Name] = t m[t.Name] = t
c2.Targets = append(c2.Targets, t) c2.Targets = append(c2.Targets, t)
} }
} }
@@ -269,9 +166,22 @@ func ParseFile(dt []byte, fn string) (*Config, error) {
return ParseFiles([]File{{Data: dt, Name: fn}}, nil) return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
} }
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
fnl := strings.ToLower(fn)
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
cfg, err := ParseCompose(dt)
return cfg, true, err
}
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
return nil, false, nil
}
cfg, err := ParseCompose(dt)
return cfg, err == nil, err
}
type Config struct { type Config struct {
Groups []*Group `json:"group" hcl:"group,block" cty:"group"` Groups []*Group `json:"group" hcl:"group,block"`
Targets []*Target `json:"target" hcl:"target,block" cty:"target"` Targets []*Target `json:"target" hcl:"target,block"`
} }
func mergeConfig(c1, c2 Config) Config { func mergeConfig(c1, c2 Config) Config {
@@ -349,46 +259,10 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
return names, nil return names, nil
} }
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
visited = append(visited, name)
for _, v := range t.Contexts {
if strings.HasPrefix(v, "target:") {
target := strings.TrimPrefix(v, "target:")
if target == t.Name {
return errors.Errorf("target %s cannot link to itself", target)
}
for _, v := range visited {
if v == target {
return errors.Errorf("infinite loop from %s to %s", name, target)
}
}
t2, ok := m[target]
if !ok {
var err error
t2, err = c.ResolveTarget(target, o)
if err != nil {
return err
}
t2.Outputs = nil
t2.linked = true
m[target] = t2
}
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
return err
}
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
if !sliceEqual(t.Platforms, t2.Platforms) {
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
}
}
}
}
return nil
}
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) { func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
m := map[string]map[string]Override{} m := map[string]map[string]Override{}
for _, v := range v { for _, v := range v {
parts := strings.SplitN(v, "=", 2) parts := strings.SplitN(v, "=", 2)
keys := strings.SplitN(parts[0], ".", 3) keys := strings.SplitN(parts[0], ".", 3)
if len(keys) < 2 { if len(keys) < 2 {
@@ -417,7 +291,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
o := t[kk[1]] o := t[kk[1]]
switch keys[1] { switch keys[1] {
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest": case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh":
if len(parts) == 2 { if len(parts) == 2 {
o.ArrValue = append(o.ArrValue, parts[1]) o.ArrValue = append(o.ArrValue, parts[1])
} }
@@ -433,11 +307,6 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
o.Value = v o.Value = v
} }
fallthrough fallthrough
case "contexts":
if len(keys) != 3 {
return nil, errors.Errorf("invalid key %s, contexts requires name", parts[0])
}
fallthrough
default: default:
if len(parts) == 2 { if len(parts) == 2 {
o.Value = parts[1] o.Value = parts[1]
@@ -450,19 +319,13 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
return m, nil return m, nil
} }
func (c Config) ResolveGroup(name string) ([]string, []string) { func (c Config) ResolveGroup(name string) []string {
targets, groups := c.group(name, map[string]visit{}) return c.group(name, map[string]struct{}{})
return dedupSlice(targets), dedupSlice(groups)
} }
type visit struct { func (c Config) group(name string, visited map[string]struct{}) []string {
target []string if _, ok := visited[name]; ok {
group []string return nil
}
func (c Config) group(name string, visited map[string]visit) ([]string, []string) {
if v, ok := visited[name]; ok {
return v.target, v.group
} }
var g *Group var g *Group
for _, group := range c.Groups { for _, group := range c.Groups {
@@ -472,32 +335,21 @@ func (c Config) group(name string, visited map[string]visit) ([]string, []string
} }
} }
if g == nil { if g == nil {
return []string{name}, nil return []string{name}
} }
visited[name] = visit{} visited[name] = struct{}{}
targets := make([]string, 0, len(g.Targets)) targets := make([]string, 0, len(g.Targets))
groups := []string{name}
for _, t := range g.Targets { for _, t := range g.Targets {
ttarget, tgroup := c.group(t, visited) targets = append(targets, c.group(t, visited)...)
if len(ttarget) > 0 {
targets = append(targets, ttarget...)
} else {
targets = append(targets, t)
} }
if len(tgroup) > 0 { return targets
groups = append(groups, tgroup...)
}
}
visited[name] = visit{target: targets, group: groups}
return targets, groups
} }
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) { func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
t, err := c.target(name, map[string]*Target{}, overrides) t, err := c.target(name, map[string]struct{}{}, overrides)
if err != nil { if err != nil {
return nil, err return nil, err
} }
t.Inherits = nil
if t.Context == nil { if t.Context == nil {
s := "." s := "."
t.Context = &s t.Context = &s
@@ -509,11 +361,11 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
return t, nil return t, nil
} }
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) { func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) {
if t, ok := visited[name]; ok { if _, ok := visited[name]; ok {
return t, nil return nil, nil
} }
visited[name] = nil visited[name] = struct{}{}
var t *Target var t *Target
for _, target := range c.Targets { for _, target := range c.Targets {
if target.Name == name { if target.Name == name {
@@ -534,6 +386,7 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
tt.Merge(t) tt.Merge(t)
} }
} }
t.Inherits = nil
m := defaultTarget() m := defaultTarget()
m.Merge(tt) m.Merge(tt)
m.Merge(t) m.Merge(t)
@@ -541,50 +394,43 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
if err := tt.AddOverrides(overrides[name]); err != nil { if err := tt.AddOverrides(overrides[name]); err != nil {
return nil, err return nil, err
} }
tt.normalize() tt.normalize()
visited[name] = tt
return tt, nil return tt, nil
} }
type Group struct { type Group struct {
Name string `json:"-" hcl:"name,label" cty:"name"` Name string `json:"-" hcl:"name,label"`
Targets []string `json:"targets" hcl:"targets" cty:"targets"` Targets []string `json:"targets" hcl:"targets"`
// Target // TODO? // Target // TODO?
} }
type Target struct { type Target struct {
Name string `json:"-" hcl:"name,label" cty:"name"` Name string `json:"-" hcl:"name,label"`
// Inherits is the only field that cannot be overridden with --set // Inherits is the only field that cannot be overridden with --set
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"` Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"` Context *string `json:"context,omitempty" hcl:"context,optional"`
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"` Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"` DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"` Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
Args map[string]string `json:"args,omitempty" hcl:"args,optional" cty:"args"` Labels map[string]string `json:"labels,omitempty" hcl:"labels,optional"`
Labels map[string]string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"` Tags []string `json:"tags,omitempty" hcl:"tags,optional"`
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"` CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional"`
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"` CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional"`
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"` Target *string `json:"target,omitempty" hcl:"target,optional"`
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"` Secrets []string `json:"secret,omitempty" hcl:"secret,optional"`
Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"` SSH []string `json:"ssh,omitempty" hcl:"ssh,optional"`
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"` Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional"`
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"` Outputs []string `json:"output,omitempty" hcl:"output,optional"`
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"` Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"` NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
NetworkMode *string `json:"-" hcl:"-" cty:"-"`
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/manuals/bake/file-definition.md.
// linked is a private field to mark a target used as a linked one // IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
linked bool
} }
func (t *Target) normalize() { func (t *Target) normalize() {
t.Attest = removeDupes(t.Attest)
t.Tags = removeDupes(t.Tags) t.Tags = removeDupes(t.Tags)
t.Secrets = removeDupes(t.Secrets) t.Secrets = removeDupes(t.Secrets)
t.SSH = removeDupes(t.SSH) t.SSH = removeDupes(t.SSH)
@@ -592,16 +438,6 @@ func (t *Target) normalize() {
t.CacheFrom = removeDupes(t.CacheFrom) t.CacheFrom = removeDupes(t.CacheFrom)
t.CacheTo = removeDupes(t.CacheTo) t.CacheTo = removeDupes(t.CacheTo)
t.Outputs = removeDupes(t.Outputs) t.Outputs = removeDupes(t.Outputs)
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
for k, v := range t.Contexts {
if v == "" {
delete(t.Contexts, k)
}
}
if len(t.Contexts) == 0 {
t.Contexts = nil
}
} }
func (t *Target) Merge(t2 *Target) { func (t *Target) Merge(t2 *Target) {
@@ -620,12 +456,6 @@ func (t *Target) Merge(t2 *Target) {
} }
t.Args[k] = v t.Args[k] = v
} }
for k, v := range t2.Contexts {
if t.Contexts == nil {
t.Contexts = map[string]string{}
}
t.Contexts[k] = v
}
for k, v := range t2.Labels { for k, v := range t2.Labels {
if t.Labels == nil { if t.Labels == nil {
t.Labels = map[string]string{} t.Labels = map[string]string{}
@@ -638,9 +468,6 @@ func (t *Target) Merge(t2 *Target) {
if t2.Target != nil { if t2.Target != nil {
t.Target = t2.Target t.Target = t2.Target
} }
if t2.Attest != nil { // merge
t.Attest = append(t.Attest, t2.Attest...)
}
if t2.Secrets != nil { // merge if t2.Secrets != nil { // merge
t.Secrets = append(t.Secrets, t2.Secrets...) t.Secrets = append(t.Secrets, t2.Secrets...)
} }
@@ -665,12 +492,6 @@ func (t *Target) Merge(t2 *Target) {
if t2.NoCache != nil { if t2.NoCache != nil {
t.NoCache = t2.NoCache t.NoCache = t2.NoCache
} }
if t2.NetworkMode != nil {
t.NetworkMode = t2.NetworkMode
}
if t2.NoCacheFilter != nil { // merge
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
}
t.Inherits = append(t.Inherits, t2.Inherits...) t.Inherits = append(t.Inherits, t2.Inherits...)
} }
@@ -691,14 +512,7 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
t.Args = map[string]string{} t.Args = map[string]string{}
} }
t.Args[keys[1]] = value t.Args[keys[1]] = value
case "contexts":
if len(keys) != 2 {
return errors.Errorf("contexts require name")
}
if t.Contexts == nil {
t.Contexts = map[string]string{}
}
t.Contexts[keys[1]] = value
case "labels": case "labels":
if len(keys) != 2 { if len(keys) != 2 {
return errors.Errorf("labels require name") return errors.Errorf("labels require name")
@@ -723,16 +537,12 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
t.Platforms = o.ArrValue t.Platforms = o.ArrValue
case "output": case "output":
t.Outputs = o.ArrValue t.Outputs = o.ArrValue
case "attest":
t.Attest = append(t.Attest, o.ArrValue...)
case "no-cache": case "no-cache":
noCache, err := strconv.ParseBool(value) noCache, err := strconv.ParseBool(value)
if err != nil { if err != nil {
return errors.Errorf("invalid value %s for boolean key no-cache", value) return errors.Errorf("invalid value %s for boolean key no-cache", value)
} }
t.NoCache = &noCache t.NoCache = &noCache
case "no-cache-filter":
t.NoCacheFilter = o.ArrValue
case "pull": case "pull":
pull, err := strconv.ParseBool(value) pull, err := strconv.ParseBool(value)
if err != nil { if err != nil {
@@ -776,21 +586,6 @@ func updateContext(t *build.Inputs, inp *Input) {
if inp == nil || inp.State == nil { if inp == nil || inp.State == nil {
return return
} }
for k, v := range t.NamedContexts {
if v.Path == "." {
t.NamedContexts[k] = build.NamedContext{Path: inp.URL}
}
if strings.HasPrefix(v.Path, "cwd://") || strings.HasPrefix(v.Path, "target:") || strings.HasPrefix(v.Path, "docker-image:") {
continue
}
if IsRemoteURL(v.Path) {
continue
}
st := llb.Scratch().File(llb.Copy(*inp.State, v.Path, "/"), llb.WithCustomNamef("set context %s to %s", k, v.Path))
t.NamedContexts[k] = build.NamedContext{State: &st}
}
if t.ContextPath == "." { if t.ContextPath == "." {
t.ContextPath = inp.URL t.ContextPath = inp.URL
return return
@@ -805,59 +600,6 @@ func updateContext(t *build.Inputs, inp *Input) {
t.ContextState = &st t.ContextState = &st
} }
// validateContextsEntitlements is a basic check to ensure contexts do not
// escape local directories when loaded from remote sources. This is to be
// replaced with proper entitlements support in the future.
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
if inp == nil || inp.State == nil {
return nil
}
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
if vv, _ := strconv.ParseBool(v); vv {
return nil
}
}
if t.ContextState == nil {
if err := checkPath(t.ContextPath); err != nil {
return err
}
}
for _, v := range t.NamedContexts {
if v.State != nil {
continue
}
if err := checkPath(v.Path); err != nil {
return err
}
}
return nil
}
func checkPath(p string) error {
if IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
return nil
}
p, err := filepath.EvalSymlinks(p)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
wd, err := os.Getwd()
if err != nil {
return err
}
rel, err := filepath.Rel(wd, p)
if err != nil {
return err
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
}
return nil
}
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) { func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
if v := t.Context; v != nil && *v == "-" { if v := t.Context; v != nil && *v == "-" {
return nil, errors.Errorf("context from stdin not allowed in bake") return nil, errors.Errorf("context from stdin not allowed in bake")
@@ -890,15 +632,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
if t.Pull != nil { if t.Pull != nil {
pull = *t.Pull pull = *t.Pull
} }
networkMode := ""
if t.NetworkMode != nil {
networkMode = *t.NetworkMode
}
bi := build.Inputs{ bi := build.Inputs{
ContextPath: contextPath, ContextPath: contextPath,
DockerfilePath: dockerfilePath, DockerfilePath: dockerfilePath,
NamedContexts: toNamedContexts(t.Contexts),
} }
if t.DockerfileInline != nil { if t.DockerfileInline != nil {
bi.DockerfileInline = *t.DockerfileInline bi.DockerfileInline = *t.DockerfileInline
@@ -907,15 +644,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
if strings.HasPrefix(bi.ContextPath, "cwd://") { if strings.HasPrefix(bi.ContextPath, "cwd://") {
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://")) bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
} }
for k, v := range bi.NamedContexts {
if strings.HasPrefix(v.Path, "cwd://") {
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
}
}
if err := validateContextsEntitlements(bi, inp); err != nil {
return nil, err
}
t.Context = &bi.ContextPath t.Context = &bi.ContextPath
@@ -925,10 +653,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
BuildArgs: t.Args, BuildArgs: t.Args,
Labels: t.Labels, Labels: t.Labels,
NoCache: noCache, NoCache: noCache,
NoCacheFilter: t.NoCacheFilter,
Pull: pull, Pull: pull,
NetworkMode: networkMode,
Linked: t.linked,
} }
platforms, err := platformutil.Parse(t.Platforms) platforms, err := platformutil.Parse(t.Platforms)
@@ -937,8 +662,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
} }
bo.Platforms = platforms bo.Platforms = platforms
dockerConfig := config.LoadDefaultConfigFile(os.Stderr) bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(os.Stderr))
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(dockerConfig))
secrets, err := buildflags.ParseSecretSpecs(t.Secrets) secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
if err != nil { if err != nil {
@@ -978,12 +702,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
} }
bo.Exports = outputs bo.Exports = outputs
attests, err := buildflags.ParseAttests(t.Attest)
if err != nil {
return nil, err
}
bo.Attests = attests
return bo, nil return bo, nil
} }
@@ -1028,39 +746,3 @@ func parseOutputType(str string) string {
} }
return "" return ""
} }
func validateTargetName(name string) error {
if !targetNamePattern.MatchString(name) {
return errors.Errorf("only %q are allowed", validTargetNameChars)
}
return nil
}
func sanitizeTargetName(target string) string {
// as stipulated in compose spec, service name can contain a dot so as
// best-effort and to avoid any potential ambiguity, we replace the dot
// with an underscore.
return strings.ReplaceAll(target, ".", "_")
}
func sliceEqual(s1, s2 []string) bool {
if len(s1) != len(s2) {
return false
}
sort.Strings(s1)
sort.Strings(s2)
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}
func toNamedContexts(m map[string]string) map[string]build.NamedContext {
m2 := make(map[string]build.NamedContext, len(m))
for k, v := range m {
m2[k] = build.NamedContext{Path: v}
}
return m2
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,44 +1,48 @@
package bake package bake
import ( import (
"fmt"
"os" "os"
"path/filepath" "reflect"
"strings" "strings"
"github.com/compose-spec/compose-go/dotenv"
"github.com/compose-spec/compose-go/loader" "github.com/compose-spec/compose-go/loader"
compose "github.com/compose-spec/compose-go/types" compose "github.com/compose-spec/compose-go/types"
"github.com/pkg/errors"
"gopkg.in/yaml.v3"
) )
func ParseComposeFiles(fs []File) (*Config, error) { func parseCompose(dt []byte) (*compose.Project, error) {
envs, err := composeEnv() return loader.Load(compose.ConfigDetails{
if err != nil { ConfigFiles: []compose.ConfigFile{
return nil, err {
} Content: dt,
var cfgs []compose.ConfigFile },
for _, f := range fs { },
cfgs = append(cfgs, compose.ConfigFile{ Environment: envMap(os.Environ()),
Filename: f.Name,
Content: f.Data,
})
}
return ParseCompose(cfgs, envs)
}
func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) {
cfg, err := loader.Load(compose.ConfigDetails{
ConfigFiles: cfgs,
Environment: envs,
}, func(options *loader.Options) { }, func(options *loader.Options) {
options.SkipNormalization = true options.SkipNormalization = true
}) })
}
func envMap(env []string) map[string]string {
result := make(map[string]string, len(env))
for _, s := range env {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
continue
}
result[kv[0]] = kv[1]
}
return result
}
func ParseCompose(dt []byte) (*Config, error) {
cfg, err := parseCompose(dt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var c Config var c Config
var zeroBuildConfig compose.BuildConfig
if len(cfg.Services) > 0 { if len(cfg.Services) > 0 {
c.Groups = []*Group{} c.Groups = []*Group{}
c.Targets = []*Target{} c.Targets = []*Target{}
@@ -46,13 +50,13 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
g := &Group{Name: "default"} g := &Group{Name: "default"}
for _, s := range cfg.Services { for _, s := range cfg.Services {
if s.Build == nil {
continue
}
targetName := sanitizeTargetName(s.Name) if s.Build == nil || reflect.DeepEqual(s.Build, zeroBuildConfig) {
if err = validateTargetName(targetName); err != nil { // if not make sure they're setting an image or it's invalid d-c.yml
return nil, errors.Wrapf(err, "invalid service name %q", targetName) if s.Image == "" {
return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided", s.Name)
}
continue
} }
var contextPathP *string var contextPathP *string
@@ -65,34 +69,17 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
dockerfilePath := s.Build.Dockerfile dockerfilePath := s.Build.Dockerfile
dockerfilePathP = &dockerfilePath dockerfilePathP = &dockerfilePath
} }
g.Targets = append(g.Targets, s.Name)
var secrets []string
for _, bs := range s.Build.Secrets {
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
if err != nil {
return nil, err
}
secrets = append(secrets, secret)
}
g.Targets = append(g.Targets, targetName)
t := &Target{ t := &Target{
Name: targetName, Name: s.Name,
Context: contextPathP, Context: contextPathP,
Dockerfile: dockerfilePathP, Dockerfile: dockerfilePathP,
Tags: s.Build.Tags,
Labels: s.Build.Labels, Labels: s.Build.Labels,
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) { Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
if val, ok := s.Environment[val]; ok && val != nil {
return *val, true
}
val, ok := cfg.Environment[val] val, ok := cfg.Environment[val]
return val, ok return val, ok
})), })),
CacheFrom: s.Build.CacheFrom, CacheFrom: s.Build.CacheFrom,
CacheTo: s.Build.CacheTo,
NetworkMode: &s.Build.Network,
Secrets: secrets,
} }
if err = t.composeExtTarget(s.Build.Extensions); err != nil { if err = t.composeExtTarget(s.Build.Extensions); err != nil {
return nil, err return nil, err
@@ -113,86 +100,6 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
return &c, nil return &c, nil
} }
func validateComposeFile(dt []byte, fn string) (bool, error) {
envs, err := composeEnv()
if err != nil {
return true, err
}
fnl := strings.ToLower(fn)
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
return true, validateCompose(dt, envs)
}
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
return false, nil
}
err = validateCompose(dt, envs)
return err == nil, err
}
func validateCompose(dt []byte, envs map[string]string) error {
_, err := loader.Load(compose.ConfigDetails{
ConfigFiles: []compose.ConfigFile{
{
Content: dt,
},
},
Environment: envs,
}, func(options *loader.Options) {
options.SkipNormalization = true
// consistency is checked later in ParseCompose to ensure multiple
// compose files can be merged together
options.SkipConsistencyCheck = true
})
return err
}
func composeEnv() (map[string]string, error) {
envs := sliceToMap(os.Environ())
if wd, err := os.Getwd(); err == nil {
envs, err = loadDotEnv(envs, wd)
if err != nil {
return nil, err
}
}
return envs, nil
}
func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string, error) {
if curenv == nil {
curenv = make(map[string]string)
}
ef, err := filepath.Abs(filepath.Join(workingDir, ".env"))
if err != nil {
return nil, err
}
if _, err = os.Stat(ef); os.IsNotExist(err) {
return curenv, nil
} else if err != nil {
return nil, err
}
dt, err := os.ReadFile(ef)
if err != nil {
return nil, err
}
envs, err := dotenv.UnmarshalBytes(dt)
if err != nil {
return nil, err
}
for k, v := range envs {
if _, set := curenv[k]; set {
continue
}
curenv[k] = v
}
return curenv, nil
}
func flatten(in compose.MappingWithEquals) compose.Mapping { func flatten(in compose.MappingWithEquals) compose.Mapping {
if len(in) == 0 { if len(in) == 0 {
return nil return nil
@@ -207,111 +114,81 @@ func flatten(in compose.MappingWithEquals) compose.Mapping {
return out return out
} }
// xbake Compose build extension provides fields not (yet) available in
// Compose build specification: https://github.com/compose-spec/compose-spec/blob/master/build.md
type xbake struct {
Tags stringArray `yaml:"tags,omitempty"`
CacheFrom stringArray `yaml:"cache-from,omitempty"`
CacheTo stringArray `yaml:"cache-to,omitempty"`
Secrets stringArray `yaml:"secret,omitempty"`
SSH stringArray `yaml:"ssh,omitempty"`
Platforms stringArray `yaml:"platforms,omitempty"`
Outputs stringArray `yaml:"output,omitempty"`
Pull *bool `yaml:"pull,omitempty"`
NoCache *bool `yaml:"no-cache,omitempty"`
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
Contexts stringMap `yaml:"contexts,omitempty"`
// don't forget to update documentation if you add a new field:
// docs/manuals/bake/compose-file.md#extension-field-with-x-bake
}
type stringMap map[string]string
type stringArray []string
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
var multi []string
err := unmarshal(&multi)
if err != nil {
var single string
if err := unmarshal(&single); err != nil {
return err
}
*sa = strings.Fields(single)
} else {
*sa = multi
}
return nil
}
// composeExtTarget converts Compose build extension x-bake to bake Target // composeExtTarget converts Compose build extension x-bake to bake Target
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension // https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
func (t *Target) composeExtTarget(exts map[string]interface{}) error { func (t *Target) composeExtTarget(exts map[string]interface{}) error {
var xb xbake if ext, ok := exts["x-bake"]; ok {
for key, val := range ext.(map[string]interface{}) {
ext, ok := exts["x-bake"] switch key {
if !ok || ext == nil { case "tags":
return nil if res, k := val.(string); k {
t.Tags = append(t.Tags, res)
} else {
for _, res := range val.([]interface{}) {
t.Tags = append(t.Tags, res.(string))
} }
yb, _ := yaml.Marshal(ext)
if err := yaml.Unmarshal(yb, &xb); err != nil {
return err
} }
case "cache-from":
if len(xb.Tags) > 0 { t.CacheFrom = []string{} // Needed to override the main field
t.Tags = dedupSlice(append(t.Tags, xb.Tags...)) if res, k := val.(string); k {
t.CacheFrom = append(t.CacheFrom, res)
} else {
for _, res := range val.([]interface{}) {
t.CacheFrom = append(t.CacheFrom, res.(string))
} }
if len(xb.CacheFrom) > 0 {
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
} }
if len(xb.CacheTo) > 0 { case "cache-to":
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...)) if res, k := val.(string); k {
t.CacheTo = append(t.CacheTo, res)
} else {
for _, res := range val.([]interface{}) {
t.CacheTo = append(t.CacheTo, res.(string))
} }
if len(xb.Secrets) > 0 {
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
} }
if len(xb.SSH) > 0 { case "secret":
t.SSH = dedupSlice(append(t.SSH, xb.SSH...)) if res, k := val.(string); k {
t.Secrets = append(t.Secrets, res)
} else {
for _, res := range val.([]interface{}) {
t.Secrets = append(t.Secrets, res.(string))
} }
if len(xb.Platforms) > 0 {
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
} }
if len(xb.Outputs) > 0 { case "ssh":
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...)) if res, k := val.(string); k {
t.SSH = append(t.SSH, res)
} else {
for _, res := range val.([]interface{}) {
t.SSH = append(t.SSH, res.(string))
} }
if xb.Pull != nil {
t.Pull = xb.Pull
} }
if xb.NoCache != nil { case "platforms":
t.NoCache = xb.NoCache if res, k := val.(string); k {
t.Platforms = append(t.Platforms, res)
} else {
for _, res := range val.([]interface{}) {
t.Platforms = append(t.Platforms, res.(string))
}
}
case "output":
if res, k := val.(string); k {
t.Outputs = append(t.Outputs, res)
} else {
for _, res := range val.([]interface{}) {
t.Outputs = append(t.Outputs, res.(string))
}
}
case "pull":
if res, ok := val.(bool); ok {
t.Pull = &res
}
case "no-cache":
if res, ok := val.(bool); ok {
t.NoCache = &res
}
default:
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
} }
if len(xb.NoCacheFilter) > 0 {
t.NoCacheFilter = dedupSlice(append(t.NoCacheFilter, xb.NoCacheFilter...))
} }
if len(xb.Contexts) > 0 {
t.Contexts = dedupMap(t.Contexts, xb.Contexts)
} }
return nil return nil
} }
// composeToBuildkitSecret converts secret from compose format to buildkit's
// csv format.
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
if psecret.External.External {
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
}
var bkattrs []string
if inp.Source != "" {
bkattrs = append(bkattrs, "id="+inp.Source)
}
if psecret.File != "" {
bkattrs = append(bkattrs, "src="+psecret.File)
}
if psecret.Environment != "" {
bkattrs = append(bkattrs, "env="+psecret.Environment)
}
return strings.Join(bkattrs, ","), nil
}

View File

@@ -2,12 +2,9 @@ package bake
import ( import (
"os" "os"
"path/filepath"
"sort" "sort"
"testing" "testing"
compose "github.com/compose-spec/compose-go/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -22,29 +19,15 @@ services:
build: build:
context: ./dir context: ./dir
dockerfile: Dockerfile-alternate dockerfile: Dockerfile-alternate
network:
none
args: args:
buildno: 123 buildno: 123
cache_from:
- type=local,src=path/to/cache
cache_to:
- type=local,dest=path/to/cache
secrets:
- token
- aws
secrets:
token:
environment: ENV_TOKEN
aws:
file: /root/.aws/credentials
`) `)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(c.Groups)) require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name) require.Equal(t, c.Groups[0].Name, "default")
sort.Strings(c.Groups[0].Targets) sort.Strings(c.Groups[0].Targets)
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets) require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
@@ -54,20 +37,12 @@ secrets:
}) })
require.Equal(t, "db", c.Targets[0].Name) require.Equal(t, "db", c.Targets[0].Name)
require.Equal(t, "./db", *c.Targets[0].Context) require.Equal(t, "./db", *c.Targets[0].Context)
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
require.Equal(t, "webapp", c.Targets[1].Name) require.Equal(t, "webapp", c.Targets[1].Name)
require.Equal(t, "./dir", *c.Targets[1].Context) require.Equal(t, "./dir", *c.Targets[1].Context)
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile) require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
require.Equal(t, 1, len(c.Targets[1].Args)) require.Equal(t, 1, len(c.Targets[1].Args))
require.Equal(t, "123", c.Targets[1].Args["buildno"]) require.Equal(t, "123", c.Targets[1].Args["buildno"])
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
require.Equal(t, "none", *c.Targets[1].NetworkMode)
require.Equal(t, []string{
"id=token,env=ENV_TOKEN",
"id=aws,src=/root/.aws/credentials",
}, c.Targets[1].Secrets)
} }
func TestNoBuildOutOfTreeService(t *testing.T) { func TestNoBuildOutOfTreeService(t *testing.T) {
@@ -78,10 +53,9 @@ services:
webapp: webapp:
build: ./db build: ./db
`) `)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(c.Groups)) require.Equal(t, 1, len(c.Groups))
require.Equal(t, 1, len(c.Targets))
} }
func TestParseComposeTarget(t *testing.T) { func TestParseComposeTarget(t *testing.T) {
@@ -97,7 +71,7 @@ services:
target: webapp target: webapp
`) `)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 2, len(c.Targets)) require.Equal(t, 2, len(c.Targets))
@@ -122,15 +96,15 @@ services:
target: webapp target: webapp
`) `)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 2, len(c.Targets)) require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool { sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name return c.Targets[i].Name < c.Targets[j].Name
}) })
require.Equal(t, "db", c.Targets[0].Name) require.Equal(t, c.Targets[0].Name, "db")
require.Equal(t, "db", *c.Targets[0].Target) require.Equal(t, "db", *c.Targets[0].Target)
require.Equal(t, "webapp", c.Targets[1].Name) require.Equal(t, c.Targets[1].Name, "webapp")
require.Equal(t, "webapp", *c.Targets[1].Target) require.Equal(t, "webapp", *c.Targets[1].Target)
} }
@@ -156,22 +130,28 @@ services:
os.Setenv("ZZZ_BAR", "zzz_foo") os.Setenv("ZZZ_BAR", "zzz_foo")
defer os.Unsetenv("ZZZ_BAR") defer os.Unsetenv("ZZZ_BAR")
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, sliceToMap(os.Environ())) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "bar", c.Targets[0].Args["FOO"]) require.Equal(t, c.Targets[0].Args["FOO"], "bar")
require.Equal(t, "zzz_foo", c.Targets[0].Args["BAR"]) require.Equal(t, c.Targets[0].Args["BAR"], "zzz_foo")
require.Equal(t, "FOO", c.Targets[0].Args["BRB"]) require.Equal(t, c.Targets[0].Args["BRB"], "FOO")
} }
func TestInconsistentComposeFile(t *testing.T) { func TestBogusCompose(t *testing.T) {
var dt = []byte(` var dt = []byte(`
services: services:
db:
labels:
- "foo"
webapp: webapp:
entrypoint: echo 1 build:
context: .
target: webapp
`) `)
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) _, err := ParseCompose(dt)
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), "has neither an image nor a build context specified: invalid compose project")
} }
func TestAdvancedNetwork(t *testing.T) { func TestAdvancedNetwork(t *testing.T) {
@@ -195,28 +175,10 @@ networks:
gateway: 10.5.0.254 gateway: 10.5.0.254
`) `)
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) _, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
} }
func TestTags(t *testing.T) {
var dt = []byte(`
services:
example:
image: example
build:
context: .
dockerfile: Dockerfile
tags:
- foo
- bar
`)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
}
func TestDependsOnList(t *testing.T) { func TestDependsOnList(t *testing.T) {
var dt = []byte(` var dt = []byte(`
version: "3.8" version: "3.8"
@@ -249,7 +211,7 @@ networks:
name: test-net name: test-net
`) `)
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) _, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
} }
@@ -263,16 +225,10 @@ services:
dockerfile: ./Dockerfile dockerfile: ./Dockerfile
cache_from: cache_from:
- user/app:cache - user/app:cache
cache_to:
- user/app:cache
tags:
- ct-addon:baz
args: args:
CT_ECR: foo CT_ECR: foo
CT_TAG: bar CT_TAG: bar
x-bake: x-bake:
contexts:
alpine: docker-image://alpine:3.13
tags: tags:
- ct-addon:foo - ct-addon:foo
- ct-addon:alp - ct-addon:alp
@@ -281,8 +237,7 @@ services:
- linux/arm64 - linux/arm64
cache-from: cache-from:
- type=local,src=path/to/cache - type=local,src=path/to/cache
cache-to: cache-to: local,dest=path/to/cache
- type=local,dest=path/to/cache
pull: true pull: true
aws: aws:
@@ -302,347 +257,27 @@ services:
no-cache: true no-cache: true
`) `)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil) c, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 2, len(c.Targets)) require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool { sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name return c.Targets[i].Name < c.Targets[j].Name
}) })
require.Equal(t, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"}, c.Targets[0].Args) require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags) require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:alp"})
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms) require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom) require.Equal(t, c.Targets[0].CacheFrom, []string{"type=local,src=path/to/cache"})
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo) require.Equal(t, c.Targets[0].CacheTo, []string{"local,dest=path/to/cache"})
require.Equal(t, newBool(true), c.Targets[0].Pull) require.Equal(t, c.Targets[0].Pull, newBool(true))
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts) require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags) require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets) require.Equal(t, c.Targets[1].SSH, []string{"default"})
require.Equal(t, []string{"default"}, c.Targets[1].SSH) require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms) require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs) require.Equal(t, c.Targets[1].NoCache, newBool(true))
require.Equal(t, newBool(true), c.Targets[1].NoCache)
}
func TestComposeExtDedup(t *testing.T) {
var dt = []byte(`
services:
webapp:
image: app:bar
build:
cache_from:
- user/app:cache
cache_to:
- user/app:cache
tags:
- ct-addon:foo
x-bake:
tags:
- ct-addon:foo
- ct-addon:baz
cache-from:
- user/app:cache
- type=local,src=path/to/cache
cache-to:
- type=local,dest=path/to/cache
`)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
}
func TestEnv(t *testing.T) {
envf, err := os.CreateTemp("", "env")
require.NoError(t, err)
defer os.Remove(envf.Name())
_, err = envf.WriteString("FOO=bsdf -csdf\n")
require.NoError(t, err)
var dt = []byte(`
services:
scratch:
build:
context: .
args:
CT_ECR: foo
FOO:
NODE_ENV:
environment:
- NODE_ENV=test
- AWS_ACCESS_KEY_ID=dummy
- AWS_SECRET_ACCESS_KEY=dummy
env_file:
- ` + envf.Name() + `
`)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"}, c.Targets[0].Args)
}
func TestDotEnv(t *testing.T) {
tmpdir := t.TempDir()
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
require.NoError(t, err)
var dt = []byte(`
services:
scratch:
build:
context: .
args:
FOO:
`)
chdir(t, tmpdir)
c, err := ParseComposeFiles([]File{{
Name: "docker-compose.yml",
Data: dt,
}})
require.NoError(t, err)
require.Equal(t, map[string]string{"FOO": "bar"}, c.Targets[0].Args)
}
func TestPorts(t *testing.T) {
var dt = []byte(`
services:
foo:
build:
context: .
ports:
- 3306:3306
bar:
build:
context: .
ports:
- mode: ingress
target: 3306
published: "3306"
protocol: tcp
`)
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
} }
func newBool(val bool) *bool { func newBool(val bool) *bool {
b := val b := val
return &b return &b
} }
func TestServiceName(t *testing.T) {
cases := []struct {
svc string
wantErr bool
}{
{
svc: "a",
wantErr: false,
},
{
svc: "abc",
wantErr: false,
},
{
svc: "a.b",
wantErr: false,
},
{
svc: "_a",
wantErr: false,
},
{
svc: "a_b",
wantErr: false,
},
{
svc: "AbC",
wantErr: false,
},
{
svc: "AbC-0123",
wantErr: false,
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.svc, func(t *testing.T) {
_, err := ParseCompose([]compose.ConfigFile{{Content: []byte(`
services:
` + tt.svc + `:
build:
context: .
`)}}, nil)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateComposeSecret(t *testing.T) {
cases := []struct {
name string
dt []byte
wantErr bool
}{
{
name: "secret set by file",
dt: []byte(`
secrets:
foo:
file: .secret
`),
wantErr: false,
},
{
name: "secret set by environment",
dt: []byte(`
secrets:
foo:
environment: TOKEN
`),
wantErr: false,
},
{
name: "external secret",
dt: []byte(`
secrets:
foo:
external: true
`),
wantErr: false,
},
{
name: "unset secret",
dt: []byte(`
secrets:
foo: {}
`),
wantErr: true,
},
{
name: "undefined secret",
dt: []byte(`
services:
foo:
build:
secrets:
- token
`),
wantErr: true,
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
_, err := ParseCompose([]compose.ConfigFile{{Content: tt.dt}}, nil)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestValidateComposeFile(t *testing.T) {
cases := []struct {
name string
fn string
dt []byte
isCompose bool
wantErr bool
}{
{
name: "empty service",
fn: "docker-compose.yml",
dt: []byte(`
services:
foo:
`),
isCompose: true,
wantErr: true,
},
{
name: "build",
fn: "docker-compose.yml",
dt: []byte(`
services:
foo:
build: .
`),
isCompose: true,
wantErr: false,
},
{
name: "image",
fn: "docker-compose.yml",
dt: []byte(`
services:
simple:
image: nginx
`),
isCompose: true,
wantErr: false,
},
{
name: "unknown ext",
fn: "docker-compose.foo",
dt: []byte(`
services:
simple:
image: nginx
`),
isCompose: true,
wantErr: false,
},
{
name: "hcl",
fn: "docker-bake.hcl",
dt: []byte(`
target "default" {
dockerfile = "test"
}
`),
isCompose: false,
wantErr: false,
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
isCompose, err := validateComposeFile(tt.dt, tt.fn)
assert.Equal(t, tt.isCompose, isCompose)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
// chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) {
olddir, err := os.Getwd()
if err != nil {
t.Fatalf("chdir: %v", err)
}
if err := os.Chdir(dir); err != nil {
t.Fatalf("chdir %s: %v", dir, err)
}
t.Cleanup(func() {
if err := os.Chdir(olddir); err != nil {
t.Errorf("chdir to original working directory %s: %v", olddir, err)
os.Exit(1)
}
})
}

View File

@@ -3,7 +3,7 @@ package bake
import ( import (
"strings" "strings"
"github.com/hashicorp/hcl/v2" hcl "github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclparse" "github.com/hashicorp/hcl/v2/hclparse"
"github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/pb"

View File

@@ -444,94 +444,6 @@ func TestHCLAttrs(t *testing.T) {
// attr-multifile // attr-multifile
} }
func TestHCLTargetAttrs(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = "xxx"
context = target.bar.context
target = target.foo.dockerfile
}
target "bar" {
dockerfile = target.foo.dockerfile
context = "yyy"
target = target.bar.context
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "bar", c.Targets[1].Name)
require.Equal(t, "xxx", *c.Targets[0].Dockerfile)
require.Equal(t, "yyy", *c.Targets[0].Context)
require.Equal(t, "xxx", *c.Targets[0].Target)
require.Equal(t, "xxx", *c.Targets[1].Dockerfile)
require.Equal(t, "yyy", *c.Targets[1].Context)
require.Equal(t, "yyy", *c.Targets[1].Target)
}
func TestHCLTargetGlobal(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = "x"
}
x = target.foo.dockerfile
y = x
target "bar" {
dockerfile = y
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "bar", c.Targets[1].Name)
require.Equal(t, "x", *c.Targets[0].Dockerfile)
require.Equal(t, "x", *c.Targets[1].Dockerfile)
}
func TestHCLTargetAttrName(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = target.foo.name
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "foo", *c.Targets[0].Dockerfile)
}
func TestHCLTargetAttrEmptyChain(t *testing.T) {
dt := []byte(`
target "foo" {
# dockerfile = Dockerfile
context = target.foo.dockerfile
target = target.foo.context
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Nil(t, c.Targets[0].Dockerfile)
require.Nil(t, c.Targets[0].Context)
require.Nil(t, c.Targets[0].Target)
}
func TestHCLAttrsCustomType(t *testing.T) { func TestHCLAttrsCustomType(t *testing.T) {
dt := []byte(` dt := []byte(`
platforms=["linux/arm64", "linux/amd64"] platforms=["linux/arm64", "linux/amd64"]
@@ -708,177 +620,3 @@ func TestHCLBuiltinVars(t *testing.T) {
require.Equal(t, "foo", *c.Targets[0].Context) require.Equal(t, "foo", *c.Targets[0].Context)
require.Equal(t, "test", *c.Targets[0].Dockerfile) require.Equal(t, "test", *c.Targets[0].Dockerfile)
} }
func TestCombineHCLAndJSONTargets(t *testing.T) {
c, err := ParseFiles([]File{
{
Name: "docker-bake.hcl",
Data: []byte(`
group "default" {
targets = ["a"]
}
target "metadata-a" {}
target "metadata-b" {}
target "a" {
inherits = ["metadata-a"]
context = "."
target = "a"
}
target "b" {
inherits = ["metadata-b"]
context = "."
target = "b"
}`),
},
{
Name: "metadata-a.json",
Data: []byte(`
{
"target": [{
"metadata-a": [{
"tags": [
"app/a:1.0.0",
"app/a:latest"
]
}]
}]
}`),
},
{
Name: "metadata-b.json",
Data: []byte(`
{
"target": [{
"metadata-b": [{
"tags": [
"app/b:1.0.0",
"app/b:latest"
]
}]
}]
}`),
},
}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name)
require.Equal(t, []string{"a"}, c.Groups[0].Targets)
require.Equal(t, 4, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "metadata-a")
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
require.Equal(t, c.Targets[1].Name, "metadata-b")
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
require.Equal(t, c.Targets[2].Name, "a")
require.Equal(t, ".", *c.Targets[2].Context)
require.Equal(t, "a", *c.Targets[2].Target)
require.Equal(t, c.Targets[3].Name, "b")
require.Equal(t, ".", *c.Targets[3].Context)
require.Equal(t, "b", *c.Targets[3].Target)
}
func TestCombineHCLAndJSONVars(t *testing.T) {
c, err := ParseFiles([]File{
{
Name: "docker-bake.hcl",
Data: []byte(`
variable "ABC" {
default = "foo"
}
variable "DEF" {
default = ""
}
group "default" {
targets = ["one"]
}
target "one" {
args = {
a = "pre-${ABC}"
}
}
target "two" {
args = {
b = "pre-${DEF}"
}
}`),
},
{
Name: "foo.json",
Data: []byte(`{"variable": {"DEF": {"default": "bar"}}, "target": { "one": { "args": {"a": "pre-${ABC}-${DEF}"}} } }`),
},
{
Name: "bar.json",
Data: []byte(`{"ABC": "ghi", "DEF": "jkl"}`),
},
}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name)
require.Equal(t, []string{"one"}, c.Groups[0].Targets)
require.Equal(t, 2, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "one")
require.Equal(t, map[string]string{"a": "pre-ghi-jkl"}, c.Targets[0].Args)
require.Equal(t, c.Targets[1].Name, "two")
require.Equal(t, map[string]string{"b": "pre-jkl"}, c.Targets[1].Args)
}
func TestEmptyVariableJSON(t *testing.T) {
dt := []byte(`{
"variable": {
"VAR": {}
}
}`)
_, err := ParseFile(dt, "docker-bake.json")
require.NoError(t, err)
}
func TestFunctionNoParams(t *testing.T) {
dt := []byte(`
function "foo" {
result = "bar"
}
target "foo_target" {
args = {
test = foo()
}
}
`)
_, err := ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
}
func TestFunctionNoResult(t *testing.T) {
dt := []byte(`
function "foo" {
params = ["a"]
}
`)
_, err := ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
}
func TestVarUnsupportedType(t *testing.T) {
dt := []byte(`
variable "FOO" {
default = []
}
target "default" {}`)
t.Setenv("FOO", "bar")
_, err := ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
}

View File

@@ -1,103 +0,0 @@
package hclparser
import (
"github.com/hashicorp/hcl/v2"
)
type filterBody struct {
body hcl.Body
schema *hcl.BodySchema
exclude bool
}
func FilterIncludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
return &filterBody{
body: body,
schema: schema,
}
}
func FilterExcludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
return &filterBody{
body: body,
schema: schema,
exclude: true,
}
}
func (b *filterBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
if b.exclude {
schema = subtractSchemas(schema, b.schema)
} else {
schema = intersectSchemas(schema, b.schema)
}
content, _, diag := b.body.PartialContent(schema)
return content, diag
}
func (b *filterBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
if b.exclude {
schema = subtractSchemas(schema, b.schema)
} else {
schema = intersectSchemas(schema, b.schema)
}
return b.body.PartialContent(schema)
}
func (b *filterBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
return b.body.JustAttributes()
}
func (b *filterBody) MissingItemRange() hcl.Range {
return b.body.MissingItemRange()
}
func intersectSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
result := &hcl.BodySchema{}
for _, blockA := range a.Blocks {
for _, blockB := range b.Blocks {
if blockA.Type == blockB.Type {
result.Blocks = append(result.Blocks, blockA)
break
}
}
}
for _, attrA := range a.Attributes {
for _, attrB := range b.Attributes {
if attrA.Name == attrB.Name {
result.Attributes = append(result.Attributes, attrA)
break
}
}
}
return result
}
func subtractSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
result := &hcl.BodySchema{}
for _, blockA := range a.Blocks {
found := false
for _, blockB := range b.Blocks {
if blockA.Type == blockB.Type {
found = true
break
}
}
if !found {
result.Blocks = append(result.Blocks, blockA)
}
}
for _, attrA := range a.Attributes {
found := false
for _, attrB := range b.Attributes {
if attrA.Name == attrB.Name {
found = true
break
}
}
if !found {
result.Attributes = append(result.Attributes, attrA)
}
}
return result
}

View File

@@ -87,7 +87,7 @@ func appendJSONFuncCalls(exp hcl.Expression, m map[string]struct{}) error {
return nil return nil
} }
rng := src.FieldByName("SrcRange") rng := src.FieldByName("SrcRange")
if rng.IsZero() { if val.IsZero() {
return nil return nil
} }
var stringVal struct { var stringVal struct {

View File

@@ -13,13 +13,11 @@ import (
"github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/gohcl"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
) )
type Opt struct { type Opt struct {
LookupVar func(string) (string, bool) LookupVar func(string) (string, bool)
Vars map[string]string Vars map[string]string
ValidateLabel func(string) error
} }
type variable struct { type variable struct {
@@ -49,17 +47,11 @@ type parser struct {
attrs map[string]*hcl.Attribute attrs map[string]*hcl.Attribute
funcs map[string]*functionDef funcs map[string]*functionDef
blocks map[string]map[string][]*hcl.Block
blockValues map[*hcl.Block]reflect.Value
blockTypes map[string]reflect.Type
ectx *hcl.EvalContext ectx *hcl.EvalContext
progress map[string]struct{} progress map[string]struct{}
progressF map[string]struct{} progressF map[string]struct{}
progressB map[*hcl.Block]map[string]struct{}
doneF map[string]struct{} doneF map[string]struct{}
doneB map[*hcl.Block]map[string]struct{}
} }
func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.Diagnostics { func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.Diagnostics {
@@ -86,59 +78,6 @@ func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.D
if _, ok := exclude[v.RootName()]; ok { if _, ok := exclude[v.RootName()]; ok {
continue continue
} }
if _, ok := p.blockTypes[v.RootName()]; ok {
blockType := v.RootName()
split := v.SimpleSplit().Rel
if len(split) == 0 {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: fmt.Sprintf("cannot access %s as a variable", blockType),
Subject: exp.Range().Ptr(),
Context: exp.Range().Ptr(),
},
}
}
blockName, ok := split[0].(hcl.TraverseAttr)
if !ok {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: fmt.Sprintf("cannot traverse %s without attribute", blockType),
Subject: exp.Range().Ptr(),
Context: exp.Range().Ptr(),
},
}
}
blocks := p.blocks[blockType][blockName.Name]
if len(blocks) == 0 {
continue
}
var target *hcl.BodySchema
if len(split) > 1 {
if attr, ok := split[1].(hcl.TraverseAttr); ok {
target = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{{Name: attr.Name}},
Blocks: []hcl.BlockHeaderSchema{{Type: attr.Name}},
}
}
}
if err := p.resolveBlock(blocks[0], target); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: err.Error(),
Subject: v.SourceRange().Ptr(),
Context: v.SourceRange().Ptr(),
},
}
}
} else {
if err := p.resolveValue(v.RootName()); err != nil { if err := p.resolveValue(v.RootName()); err != nil {
return hcl.Diagnostics{ return hcl.Diagnostics{
&hcl.Diagnostic{ &hcl.Diagnostic{
@@ -151,13 +90,10 @@ func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.D
} }
} }
} }
}
return nil return nil
} }
// resolveFunction forces evaluation of a function, storing the result into the
// parser.
func (p *parser) resolveFunction(name string) error { func (p *parser) resolveFunction(name string) error {
if _, ok := p.doneF[name]; ok { if _, ok := p.doneF[name]; ok {
return nil return nil
@@ -174,13 +110,6 @@ func (p *parser) resolveFunction(name string) error {
} }
p.progressF[name] = struct{}{} p.progressF[name] = struct{}{}
if f.Result == nil {
return errors.Errorf("empty result not allowed for %s", name)
}
if f.Params == nil {
return errors.Errorf("empty params not allowed for %s", name)
}
paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr) paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr)
if paramsDiags.HasErrors() { if paramsDiags.HasErrors() {
return paramsDiags return paramsDiags
@@ -233,8 +162,6 @@ func (p *parser) resolveFunction(name string) error {
return nil return nil
} }
// resolveValue forces evaluation of a named value, storing the result into the
// parser.
func (p *parser) resolveValue(name string) (err error) { func (p *parser) resolveValue(name string) (err error) {
if _, ok := p.ectx.Variables[name]; ok { if _, ok := p.ectx.Variables[name]; ok {
return nil return nil
@@ -306,164 +233,13 @@ func (p *parser) resolveValue(name string) (err error) {
return nil return nil
} else { } else {
// TODO: support lists with csv values // TODO: support lists with csv values
return errors.Errorf("unsupported type %s for variable %s", vv.Type().FriendlyName(), name) return errors.Errorf("unsupported type %s for variable %s", v.Type(), name)
} }
} }
v = &vv v = &vv
return nil return nil
} }
// resolveBlock force evaluates a block, storing the result in the parser. If a
// target schema is provided, only the attributes and blocks present in the
// schema will be evaluated.
func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err error) {
name := block.Labels[0]
if err := p.opt.ValidateLabel(name); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid name",
Detail: err.Error(),
Subject: &block.LabelRanges[0],
},
}
}
if _, ok := p.doneB[block]; !ok {
p.doneB[block] = map[string]struct{}{}
}
if _, ok := p.progressB[block]; !ok {
p.progressB[block] = map[string]struct{}{}
}
if target != nil {
// filter out attributes and blocks that are already evaluated
original := target
target = &hcl.BodySchema{}
for _, a := range original.Attributes {
if _, ok := p.doneB[block][a.Name]; !ok {
target.Attributes = append(target.Attributes, a)
}
}
for _, b := range original.Blocks {
if _, ok := p.doneB[block][b.Type]; !ok {
target.Blocks = append(target.Blocks, b)
}
}
if len(target.Attributes) == 0 && len(target.Blocks) == 0 {
return nil
}
}
if target != nil {
// detect reference cycles
for _, a := range target.Attributes {
if _, ok := p.progressB[block][a.Name]; ok {
return errors.Errorf("reference cycle not allowed for %s.%s.%s", block.Type, name, a.Name)
}
}
for _, b := range target.Blocks {
if _, ok := p.progressB[block][b.Type]; ok {
return errors.Errorf("reference cycle not allowed for %s.%s.%s", block.Type, name, b.Type)
}
}
for _, a := range target.Attributes {
p.progressB[block][a.Name] = struct{}{}
}
for _, b := range target.Blocks {
p.progressB[block][b.Type] = struct{}{}
}
}
// create a filtered body that contains only the target properties
body := func() hcl.Body {
if target != nil {
return FilterIncludeBody(block.Body, target)
}
filter := &hcl.BodySchema{}
for k := range p.doneB[block] {
filter.Attributes = append(filter.Attributes, hcl.AttributeSchema{Name: k})
filter.Blocks = append(filter.Blocks, hcl.BlockHeaderSchema{Type: k})
}
return FilterExcludeBody(block.Body, filter)
}
// load dependencies from all targeted properties
t, ok := p.blockTypes[block.Type]
if !ok {
return nil
}
schema, _ := gohcl.ImpliedBodySchema(reflect.New(t).Interface())
content, _, diag := body().PartialContent(schema)
if diag.HasErrors() {
return diag
}
for _, a := range content.Attributes {
diag := p.loadDeps(a.Expr, nil)
if diag.HasErrors() {
return diag
}
}
for _, b := range content.Blocks {
err := p.resolveBlock(b, nil)
if err != nil {
return err
}
}
// decode!
var output reflect.Value
if prev, ok := p.blockValues[block]; ok {
output = prev
} else {
output = reflect.New(t)
setLabel(output, block.Labels[0]) // early attach labels, so we can reference them
}
diag = gohcl.DecodeBody(body(), p.ectx, output.Interface())
if diag.HasErrors() {
return diag
}
p.blockValues[block] = output
// mark all targeted properties as done
for _, a := range content.Attributes {
p.doneB[block][a.Name] = struct{}{}
}
for _, b := range content.Blocks {
p.doneB[block][b.Type] = struct{}{}
}
if target != nil {
for _, a := range target.Attributes {
p.doneB[block][a.Name] = struct{}{}
}
for _, b := range target.Blocks {
p.doneB[block][b.Type] = struct{}{}
}
}
// store the result into the evaluation context (so if can be referenced)
outputType, err := gocty.ImpliedType(output.Interface())
if err != nil {
return err
}
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
if err != nil {
return err
}
var m map[string]cty.Value
if m2, ok := p.ectx.Variables[block.Type]; ok {
m = m2.AsValueMap()
}
if m == nil {
m = map[string]cty.Value{}
}
m[name] = outputValue
p.ectx.Variables[block.Type] = cty.MapVal(m)
return nil
}
func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics { func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
reserved := map[string]struct{}{} reserved := map[string]struct{}{}
schema, _ := gohcl.ImpliedBodySchema(val) schema, _ := gohcl.ImpliedBodySchema(val)
@@ -479,7 +255,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
if err := gohcl.DecodeBody(b, nil, &defs); err != nil { if err := gohcl.DecodeBody(b, nil, &defs); err != nil {
return err return err
} }
defsSchema, _ := gohcl.ImpliedBodySchema(defs)
if opt.LookupVar == nil { if opt.LookupVar == nil {
opt.LookupVar = func(string) (string, bool) { opt.LookupVar = func(string) (string, bool) {
@@ -487,12 +262,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
} }
} }
if opt.ValidateLabel == nil {
opt.ValidateLabel = func(string) error {
return nil
}
}
p := &parser{ p := &parser{
opt: opt, opt: opt,
@@ -500,16 +269,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
attrs: map[string]*hcl.Attribute{}, attrs: map[string]*hcl.Attribute{},
funcs: map[string]*functionDef{}, funcs: map[string]*functionDef{},
blocks: map[string]map[string][]*hcl.Block{},
blockValues: map[*hcl.Block]reflect.Value{},
blockTypes: map[string]reflect.Type{},
progress: map[string]struct{}{}, progress: map[string]struct{}{},
progressF: map[string]struct{}{}, progressF: map[string]struct{}{},
progressB: map[*hcl.Block]map[string]struct{}{},
doneF: map[string]struct{}{}, doneF: map[string]struct{}{},
doneB: map[*hcl.Block]map[string]struct{}{},
ectx: &hcl.EvalContext{ ectx: &hcl.EvalContext{
Variables: map[string]cty.Value{}, Variables: map[string]cty.Value{},
Functions: stdlibFunctions, Functions: stdlibFunctions,
@@ -531,20 +293,12 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
p.funcs[v.Name] = v p.funcs[v.Name] = v
} }
content, b, diags := b.PartialContent(schema)
if diags.HasErrors() {
return diags
}
blocks, b, diags := b.PartialContent(defsSchema)
if diags.HasErrors() {
return diags
}
attrs, diags := b.JustAttributes() attrs, diags := b.JustAttributes()
if diags.HasErrors() { if diags.HasErrors() {
if d := removeAttributesDiags(diags, reserved, p.vars); len(d) > 0 { for _, d := range diags {
return d if d.Detail != "Blocks are not allowed here." {
return diags
}
} }
} }
@@ -560,17 +314,22 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
_ = p.resolveValue(k) _ = p.resolveValue(k)
} }
for _, a := range content.Attributes { for k := range p.attrs {
if err := p.resolveValue(k); err != nil {
if diags, ok := err.(hcl.Diagnostics); ok {
return diags
}
return hcl.Diagnostics{ return hcl.Diagnostics{
&hcl.Diagnostic{ &hcl.Diagnostic{
Severity: hcl.DiagError, Severity: hcl.DiagError,
Summary: "Invalid attribute", Summary: "Invalid attribute",
Detail: "global attributes currently not supported", Detail: err.Error(),
Subject: &a.Range, Subject: &p.attrs[k].Range,
Context: &a.Range, Context: &p.attrs[k].Range,
}, },
} }
} }
}
for k := range p.vars { for k := range p.vars {
if err := p.resolveValue(k); err != nil { if err := p.resolveValue(k); err != nil {
@@ -595,32 +354,36 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
if diags, ok := err.(hcl.Diagnostics); ok { if diags, ok := err.(hcl.Diagnostics); ok {
return diags return diags
} }
var subject *hcl.Range
var context *hcl.Range
if p.funcs[k].Params != nil {
subject = &p.funcs[k].Params.Range
context = subject
} else {
for _, block := range blocks.Blocks {
if block.Type == "function" && len(block.Labels) == 1 && block.Labels[0] == k {
subject = &block.LabelRanges[0]
context = &block.DefRange
break
}
}
}
return hcl.Diagnostics{ return hcl.Diagnostics{
&hcl.Diagnostic{ &hcl.Diagnostic{
Severity: hcl.DiagError, Severity: hcl.DiagError,
Summary: "Invalid function", Summary: "Invalid function",
Detail: err.Error(), Detail: err.Error(),
Subject: subject, Subject: &p.funcs[k].Params.Range,
Context: context, Context: &p.funcs[k].Params.Range,
}, },
} }
} }
} }
content, _, diags := b.PartialContent(schema)
if diags.HasErrors() {
return diags
}
for _, a := range content.Attributes {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: "global attributes currently not supported",
Subject: &a.Range,
Context: &a.Range,
},
}
}
m := map[string]map[string][]*hcl.Block{}
for _, b := range content.Blocks { for _, b := range content.Blocks {
if len(b.Labels) == 0 || len(b.Labels) > 1 { if len(b.Labels) == 0 || len(b.Labels) > 1 {
return hcl.Diagnostics{ return hcl.Diagnostics{
@@ -633,16 +396,19 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
}, },
} }
} }
bm, ok := p.blocks[b.Type] bm, ok := m[b.Type]
if !ok { if !ok {
bm = map[string][]*hcl.Block{} bm = map[string][]*hcl.Block{}
p.blocks[b.Type] = bm m[b.Type] = bm
} }
lbl := b.Labels[0] lbl := b.Labels[0]
bm[lbl] = append(bm[lbl], b) bm[lbl] = append(bm[lbl], b)
} }
vt := reflect.ValueOf(val).Elem().Type()
numFields := vt.NumField()
type value struct { type value struct {
reflect.Value reflect.Value
idx int idx int
@@ -654,11 +420,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
} }
types := map[string]field{} types := map[string]field{}
vt := reflect.ValueOf(val).Elem().Type() for i := 0; i < numFields; i++ {
for i := 0; i < vt.NumField(); i++ {
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",") tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
types[tags[0]] = field{ types[tags[0]] = field{
idx: i, idx: i,
typ: vt.Field(i).Type, typ: vt.Field(i).Type,
@@ -670,29 +434,18 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
for _, b := range content.Blocks { for _, b := range content.Blocks {
v := reflect.ValueOf(val) v := reflect.ValueOf(val)
err := p.resolveBlock(b, nil) t, ok := types[b.Type]
if err != nil { if !ok {
if diag, ok := err.(hcl.Diagnostics); ok { continue
}
vv := reflect.New(t.typ.Elem().Elem())
diag := gohcl.DecodeBody(b.Body, p.ectx, vv.Interface())
if diag.HasErrors() { if diag.HasErrors() {
diags = append(diags, diag...) diags = append(diags, diag...)
continue continue
} }
} else {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: err.Error(),
Subject: &b.LabelRanges[0],
Context: &b.DefRange,
},
}
}
}
vv := p.blockValues[b]
t := types[b.Type]
lblIndex := setLabel(vv, b.Labels[0]) lblIndex := setLabel(vv, b.Labels[0])
oldValue, exists := t.values[b.Labels[0]] oldValue, exists := t.values[b.Labels[0]]
@@ -706,6 +459,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
} }
} }
} }
} }
if exists { if exists {
if m := oldValue.Value.MethodByName("Merge"); m.IsValid() { if m := oldValue.Value.MethodByName("Merge"); m.IsValid() {
@@ -726,23 +480,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
return diags return diags
} }
for k := range p.attrs {
if err := p.resolveValue(k); err != nil {
if diags, ok := err.(hcl.Diagnostics); ok {
return diags
}
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: err.Error(),
Subject: &p.attrs[k].Range,
Context: &p.attrs[k].Range,
},
}
}
}
return nil return nil
} }
@@ -759,33 +496,3 @@ func setLabel(v reflect.Value, lbl string) int {
} }
return -1 return -1
} }
func removeAttributesDiags(diags hcl.Diagnostics, reserved map[string]struct{}, vars map[string]*variable) hcl.Diagnostics {
var fdiags hcl.Diagnostics
for _, d := range diags {
if fout := func(d *hcl.Diagnostic) bool {
// https://github.com/docker/buildx/pull/541
if d.Detail == "Blocks are not allowed here." {
return true
}
for r := range reserved {
// JSON body objects don't handle repeated blocks like HCL but
// reserved name attributes should be allowed when multi bodies are merged.
// https://github.com/hashicorp/hcl/blob/main/json/spec.md#blocks
if strings.HasPrefix(d.Detail, fmt.Sprintf(`Argument "%s" was already set at `, r)) {
return true
}
}
for v := range vars {
// Do the same for global variables
if strings.HasPrefix(d.Detail, fmt.Sprintf(`Argument "%s" was already set at `, v)) {
return true
}
}
return false
}(d); !fout {
fdiags = append(fdiags, d)
}
}
return fdiags
}

View File

@@ -1,15 +1,12 @@
package hclparser package hclparser
import ( import (
"time"
"github.com/hashicorp/go-cty-funcs/cidr" "github.com/hashicorp/go-cty-funcs/cidr"
"github.com/hashicorp/go-cty-funcs/crypto" "github.com/hashicorp/go-cty-funcs/crypto"
"github.com/hashicorp/go-cty-funcs/encoding" "github.com/hashicorp/go-cty-funcs/encoding"
"github.com/hashicorp/go-cty-funcs/uuid" "github.com/hashicorp/go-cty-funcs/uuid"
"github.com/hashicorp/hcl/v2/ext/tryfunc" "github.com/hashicorp/hcl/v2/ext/tryfunc"
"github.com/hashicorp/hcl/v2/ext/typeexpr" "github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/function/stdlib" "github.com/zclconf/go-cty/cty/function/stdlib"
) )
@@ -99,7 +96,6 @@ var stdlibFunctions = map[string]function.Function{
"substr": stdlib.SubstrFunc, "substr": stdlib.SubstrFunc,
"subtract": stdlib.SubtractFunc, "subtract": stdlib.SubtractFunc,
"timeadd": stdlib.TimeAddFunc, "timeadd": stdlib.TimeAddFunc,
"timestamp": timestampFunc,
"title": stdlib.TitleFunc, "title": stdlib.TitleFunc,
"trim": stdlib.TrimFunc, "trim": stdlib.TrimFunc,
"trimprefix": stdlib.TrimPrefixFunc, "trimprefix": stdlib.TrimPrefixFunc,
@@ -113,14 +109,3 @@ var stdlibFunctions = map[string]function.Function{
"values": stdlib.ValuesFunc, "values": stdlib.ValuesFunc,
"zipmap": stdlib.ZipmapFunc, "zipmap": stdlib.ZipmapFunc,
} }
// timestampFunc constructs a function that returns a string representation of the current date and time.
//
// This function was imported from terraform's datetime utilities.
var timestampFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
},
})

View File

@@ -6,7 +6,7 @@ import (
"context" "context"
"strings" "strings"
"github.com/docker/buildx/builder" "github.com/docker/buildx/build"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
@@ -20,7 +20,7 @@ type Input struct {
URL string URL string
} }
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) { func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
var filename string var filename string
st, ok := detectGitContext(url) st, ok := detectGitContext(url)
if !ok { if !ok {
@@ -33,18 +33,18 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
inp := &Input{State: st, URL: url} inp := &Input{State: st, URL: url}
var files []File var files []File
var node *builder.Node var di *build.DriverInfo
for _, n := range nodes { for _, d := range dis {
if n.Err == nil { if d.Err == nil {
node = &n di = &d
continue continue
} }
} }
if node == nil { if di == nil {
return nil, nil, nil return nil, nil, nil
} }
c, err := driver.Boot(ctx, ctx, node.Driver, pw) c, err := driver.Boot(ctx, ctx, di.Driver, pw)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,109 +0,0 @@
package build
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/docker/buildx/util/gitutil"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
const DockerfileLabel = "com.docker.image.source.entrypoint"
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (res map[string]string) {
res = make(map[string]string)
if contextPath == "" {
return
}
setGitLabels := false
if v, ok := os.LookupEnv("BUILDX_GIT_LABELS"); ok {
if v == "full" { // backward compatibility with old "full" mode
setGitLabels = true
} else if v, _ := strconv.ParseBool(v); v {
setGitLabels = v
}
}
setGitInfo := true
if v, ok := os.LookupEnv("BUILDX_GIT_INFO"); ok {
if v, _ := strconv.ParseBool(v); v {
setGitInfo = v
}
}
if !setGitLabels && !setGitInfo {
return
}
// figure out in which directory the git command needs to run in
var wd string
if filepath.IsAbs(contextPath) {
wd = contextPath
} else {
cwd, _ := os.Getwd()
wd, _ = filepath.Abs(filepath.Join(cwd, contextPath))
}
gitc := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
if !gitc.IsInsideWorkTree() {
logrus.Warnf("Unable to determine Git information")
return
}
var resRevision, resSource, resDockerfilePath string
if sha, err := gitc.FullCommit(); err == nil && sha != "" {
resRevision = sha
if gitc.IsDirty() {
resRevision += "-dirty"
}
}
if rurl, err := gitc.RemoteURL(); err == nil && rurl != "" {
resSource = rurl
}
if setGitLabels {
if root, err := gitc.RootDir(); err == nil && root != "" {
if dockerfilePath == "" {
dockerfilePath = filepath.Join(wd, "Dockerfile")
}
if !filepath.IsAbs(dockerfilePath) {
cwd, _ := os.Getwd()
dockerfilePath = filepath.Join(cwd, dockerfilePath)
}
dockerfilePath, _ = filepath.Rel(root, dockerfilePath)
if !strings.HasPrefix(dockerfilePath, "..") {
resDockerfilePath = dockerfilePath
}
}
}
if resSource != "" {
if setGitLabels {
res["label:"+specs.AnnotationSource] = resSource
}
if setGitInfo {
res["vcs:source"] = resSource
}
}
if resRevision != "" {
if setGitLabels {
res["label:"+specs.AnnotationRevision] = resRevision
}
if setGitInfo {
res["vcs:revision"] = resRevision
}
}
if resDockerfilePath != "" {
if setGitLabels {
res["label:"+DockerfileLabel] = resDockerfilePath
}
}
return
}

View File

@@ -1,132 +0,0 @@
package build
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/docker/buildx/util/gitutil"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
)
func setupTest(tb testing.TB) {
gitutil.Mktmp(tb)
gitutil.GitInit(tb)
df := []byte("FROM alpine:latest\n")
assert.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
gitutil.GitAdd(tb, "Dockerfile")
gitutil.GitCommit(tb, "initial commit")
}
func TestGetGitAttributesNoContext(t *testing.T) {
setupTest(t)
gitattrs := getGitAttributes(context.Background(), "", "Dockerfile")
assert.Empty(t, gitattrs)
}
func TestGetGitAttributes(t *testing.T) {
cases := []struct {
name string
envGitLabels string
envGitInfo string
expected []string
}{
{
name: "default",
envGitLabels: "",
envGitInfo: "",
expected: []string{
"vcs:revision",
},
},
{
name: "gitinfo",
envGitLabels: "false",
envGitInfo: "true",
expected: []string{
"vcs:revision",
},
},
{
name: "gitlabels",
envGitLabels: "true",
envGitInfo: "false",
expected: []string{
"label:" + DockerfileLabel,
"label:" + specs.AnnotationRevision,
},
},
{
name: "both",
envGitLabels: "true",
envGitInfo: "",
expected: []string{
"label:" + DockerfileLabel,
"label:" + specs.AnnotationRevision,
"vcs:revision",
},
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
setupTest(t)
if tt.envGitLabels != "" {
t.Setenv("BUILDX_GIT_LABELS", tt.envGitLabels)
}
if tt.envGitInfo != "" {
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
}
gitattrs := getGitAttributes(context.Background(), ".", "Dockerfile")
for _, e := range tt.expected {
assert.Contains(t, gitattrs, e)
assert.NotEmpty(t, gitattrs[e])
if e == "label:"+DockerfileLabel {
assert.Equal(t, "Dockerfile", gitattrs[e])
}
}
})
}
}
func TestGetGitAttributesWithRemote(t *testing.T) {
setupTest(t)
gitutil.GitSetRemote(t, "git@github.com:docker/buildx.git")
t.Setenv("BUILDX_GIT_LABELS", "true")
gitattrs := getGitAttributes(context.Background(), ".", "Dockerfile")
assert.Equal(t, 5, len(gitattrs))
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
assert.NotEmpty(t, gitattrs["label:"+specs.AnnotationRevision])
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
assert.Contains(t, gitattrs, "vcs:revision")
assert.NotEmpty(t, gitattrs["vcs:revision"])
assert.Contains(t, gitattrs, "vcs:source")
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
}
func TestGetGitAttributesDirty(t *testing.T) {
setupTest(t)
// make a change to test dirty flag
df := []byte("FROM alpine:edge\n")
assert.NoError(t, os.Mkdir("dir", 0755))
assert.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
t.Setenv("BUILDX_GIT_LABELS", "true")
gitattrs := getGitAttributes(context.Background(), ".", "Dockerfile")
assert.Equal(t, 3, len(gitattrs))
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
assert.Contains(t, gitattrs, "vcs:revision")
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
}

View File

@@ -2,7 +2,7 @@ package build
import ( import (
"context" "context"
"os" "io/ioutil"
"path/filepath" "path/filepath"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
@@ -53,11 +53,11 @@ func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url strin
if err != nil { if err != nil {
return nil, err return nil, err
} }
dir, err := os.MkdirTemp("", "buildx") dir, err := ioutil.TempDir("", "buildx")
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := os.WriteFile(filepath.Join(dir, "Dockerfile"), dt, 0600); err != nil { if err := ioutil.WriteFile(filepath.Join(dir, "Dockerfile"), dt, 0600); err != nil {
return nil, err return nil, err
} }
out = dir out = dir

View File

@@ -11,14 +11,8 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const ( // archiveHeaderSize is the number of bytes in an archive header
// archiveHeaderSize is the number of bytes in an archive header const archiveHeaderSize = 512
archiveHeaderSize = 512
// mobyHostGatewayName defines a special string which users can append to
// --add-host to add an extra entry in /etc/hosts that maps
// host.docker.internal to the host IP
mobyHostGatewayName = "host-gateway"
)
func isLocalDir(c string) bool { func isLocalDir(c string) bool {
st, err := os.Stat(c) st, err := os.Stat(c)
@@ -45,23 +39,18 @@ func isArchive(header []byte) bool {
} }
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format // toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
func toBuildkitExtraHosts(inp []string, mobyDriver bool) (string, error) { func toBuildkitExtraHosts(inp []string) (string, error) {
if len(inp) == 0 { if len(inp) == 0 {
return "", nil return "", nil
} }
hosts := make([]string, 0, len(inp)) hosts := make([]string, 0, len(inp))
for _, h := range inp { for _, h := range inp {
host, ip, ok := strings.Cut(h, ":") parts := strings.Split(h, ":")
if !ok || host == "" || ip == "" {
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
return "", errors.Errorf("invalid host %s", h) return "", errors.Errorf("invalid host %s", h)
} }
// Skip IP address validation for "host-gateway" string with moby driver hosts = append(hosts, parts[0]+"="+parts[1])
if !mobyDriver || ip != mobyHostGatewayName {
if net.ParseIP(ip) == nil {
return "", errors.Errorf("invalid host %s", h)
}
}
hosts = append(hosts, host+"="+ip)
} }
return strings.Join(hosts, ","), nil return strings.Join(hosts, ","), nil
} }

View File

@@ -1,292 +0,0 @@
package builder
import (
"context"
"os"
"sort"
"sync"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// Builder represents an active builder object
type Builder struct {
*store.NodeGroup
driverFactory driverFactory
nodes []Node
opts builderOpts
err error
}
type builderOpts struct {
dockerCli command.Cli
name string
txn *store.Txn
contextPathHash string
validate bool
}
// Option provides a variadic option for configuring the builder.
type Option func(b *Builder)
// WithName sets builder name.
func WithName(name string) Option {
return func(b *Builder) {
b.opts.name = name
}
}
// WithStore sets a store instance used at init.
func WithStore(txn *store.Txn) Option {
return func(b *Builder) {
b.opts.txn = txn
}
}
// WithContextPathHash is used for determining pods in k8s driver instance.
func WithContextPathHash(contextPathHash string) Option {
return func(b *Builder) {
b.opts.contextPathHash = contextPathHash
}
}
// WithSkippedValidation skips builder context validation.
func WithSkippedValidation() Option {
return func(b *Builder) {
b.opts.validate = false
}
}
// New initializes a new builder client
func New(dockerCli command.Cli, opts ...Option) (_ *Builder, err error) {
b := &Builder{
opts: builderOpts{
dockerCli: dockerCli,
validate: true,
},
}
for _, opt := range opts {
opt(b)
}
if b.opts.txn == nil {
// if store instance is nil we create a short-lived one using the
// default store and ensure we release it on completion
var release func()
b.opts.txn, release, err = storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
}
if b.opts.name != "" {
if b.NodeGroup, err = storeutil.GetNodeGroup(b.opts.txn, dockerCli, b.opts.name); err != nil {
return nil, err
}
} else {
if b.NodeGroup, err = storeutil.GetCurrentInstance(b.opts.txn, dockerCli); err != nil {
return nil, err
}
}
if b.opts.validate {
if err = b.Validate(); err != nil {
return nil, err
}
}
return b, nil
}
// Validate validates builder context
func (b *Builder) Validate() error {
if b.NodeGroup.DockerContext {
list, err := b.opts.dockerCli.ContextStore().List()
if err != nil {
return err
}
currentContext := b.opts.dockerCli.CurrentContext()
for _, l := range list {
if l.Name == b.Name && l.Name != currentContext {
return errors.Errorf("use `docker --context=%s buildx` to switch to context %q", l.Name, l.Name)
}
}
}
return nil
}
// ContextName returns builder context name if available.
func (b *Builder) ContextName() string {
ctxbuilders, err := b.opts.dockerCli.ContextStore().List()
if err != nil {
return ""
}
for _, cb := range ctxbuilders {
if b.NodeGroup.Driver == "docker" && len(b.NodeGroup.Nodes) == 1 && b.NodeGroup.Nodes[0].Endpoint == cb.Name {
return cb.Name
}
}
return ""
}
// ImageOpt returns registry auth configuration
func (b *Builder) ImageOpt() (imagetools.Opt, error) {
return storeutil.GetImageConfig(b.opts.dockerCli, b.NodeGroup)
}
// Boot bootstrap a builder
func (b *Builder) Boot(ctx context.Context) (bool, error) {
toBoot := make([]int, 0, len(b.nodes))
for idx, d := range b.nodes {
if d.Err != nil || d.Driver == nil || d.DriverInfo == nil {
continue
}
if d.DriverInfo.Status != driver.Running {
toBoot = append(toBoot, idx)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
if err != nil {
return false, err
}
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, b.NodeGroup.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
if err != nil {
b.nodes[idx].Err = err
}
return nil
})
}(idx)
}
err = eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}
// Inactive checks if all nodes are inactive for this builder.
func (b *Builder) Inactive() bool {
for _, d := range b.nodes {
if d.DriverInfo != nil && d.DriverInfo.Status == driver.Running {
return false
}
}
return true
}
// Err returns error if any.
func (b *Builder) Err() error {
return b.err
}
type driverFactory struct {
driver.Factory
once sync.Once
}
// Factory returns the driver factory.
func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
b.driverFactory.once.Do(func() {
if b.Driver != "" {
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
if err != nil {
return
}
} else {
// empty driver means nodegroup was implicitly created as a default
// driver for a docker context and allows falling back to a
// docker-container driver for older daemon that doesn't support
// buildkit (< 18.06).
ep := b.nodes[0].Endpoint
var dockerapi *dockerutil.ClientAPI
dockerapi, err = dockerutil.NewClientAPI(b.opts.dockerCli, b.nodes[0].Endpoint)
if err != nil {
return
}
// check if endpoint is healthy is needed to determine the driver type.
// if this fails then can't continue with driver selection.
if _, err = dockerapi.Ping(ctx); err != nil {
return
}
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
if err != nil {
return
}
b.Driver = b.driverFactory.Factory.Name()
}
})
return b.driverFactory.Factory, err
}
// GetBuilders returns all builders
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
storeng, err := txn.List()
if err != nil {
return nil, err
}
builders := make([]*Builder, len(storeng))
seen := make(map[string]struct{})
for i, ng := range storeng {
b, err := New(dockerCli,
WithName(ng.Name),
WithStore(txn),
WithSkippedValidation(),
)
if err != nil {
return nil, err
}
builders[i] = b
seen[b.NodeGroup.Name] = struct{}{}
}
contexts, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
sort.Slice(contexts, func(i, j int) bool {
return contexts[i].Name < contexts[j].Name
})
for _, c := range contexts {
// if a context has the same name as an instance from the store, do not
// add it to the builders list. An instance from the store takes
// precedence over context builders.
if _, ok := seen[c.Name]; ok {
continue
}
b, err := New(dockerCli,
WithName(c.Name),
WithStore(txn),
WithSkippedValidation(),
)
if err != nil {
return nil, err
}
builders = append(builders, b)
}
return builders, nil
}

View File

@@ -1,201 +0,0 @@
package builder
import (
"context"
"github.com/docker/buildx/driver"
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/platformutil"
"github.com/moby/buildkit/util/grpcerrors"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
)
type Node struct {
store.Node
Driver driver.Driver
DriverInfo *driver.Info
Platforms []ocispecs.Platform
ImageOpt imagetools.Opt
ProxyConfig map[string]string
Version string
Err error
}
// Nodes returns nodes for this builder.
func (b *Builder) Nodes() []Node {
return b.nodes
}
// LoadNodes loads and returns nodes for this builder.
// TODO: this should be a method on a Node object and lazy load data for each driver.
func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) {
eg, _ := errgroup.WithContext(ctx)
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
defer func() {
if b.err == nil && err != nil {
b.err = err
}
}()
factory, err := b.Factory(ctx)
if err != nil {
return nil, err
}
imageopt, err := b.ImageOpt()
if err != nil {
return nil, err
}
for i, n := range b.NodeGroup.Nodes {
func(i int, n store.Node) {
eg.Go(func() error {
node := Node{
Node: n,
ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli),
}
defer func() {
b.nodes[i] = node
}()
dockerapi, err := dockerutil.NewClientAPI(b.opts.dockerCli, n.Endpoint)
if err != nil {
node.Err = err
return nil
}
contextStore := b.opts.dockerCli.ContextStore()
var kcc driver.KubeClientConfig
kcc, err = ctxkube.ConfigFromContext(n.Endpoint, contextStore)
if err != nil {
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
// try again with name="default".
// FIXME(@AkihiroSuda): n should retain real context name.
kcc, err = ctxkube.ConfigFromContext("default", contextStore)
if err != nil {
logrus.Error(err)
}
}
tryToUseKubeConfigInCluster := false
if kcc == nil {
tryToUseKubeConfigInCluster = true
} else {
if _, err := kcc.ClientConfig(); err != nil {
tryToUseKubeConfigInCluster = true
}
}
if tryToUseKubeConfigInCluster {
kccInCluster := driver.KubeClientConfigInCluster{}
if _, err := kccInCluster.ClientConfig(); err == nil {
logrus.Debug("using kube config in cluster")
kcc = kccInCluster
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash)
if err != nil {
node.Err = err
return nil
}
node.Driver = d
node.ImageOpt = imageopt
if withData {
if err := node.loadData(ctx); err != nil {
node.Err = err
}
}
return nil
})
}(i, n)
}
if err := eg.Wait(); err != nil {
return nil, err
}
// TODO: This should be done in the routine loading driver data
if withData {
kubernetesDriverCount := 0
for _, d := range b.nodes {
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
kubernetesDriverCount++
}
}
isAllKubernetesDrivers := len(b.nodes) == kubernetesDriverCount
if isAllKubernetesDrivers {
var nodes []Node
var dynamicNodes []store.Node
for _, di := range b.nodes {
// dynamic nodes are used in Kubernetes driver.
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
diClone := di
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.Platforms = pl
}
nodes = append(nodes, di)
}
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
}
}
// not append (remove the static nodes in the store)
b.NodeGroup.Nodes = dynamicNodes
b.nodes = nodes
b.NodeGroup.Dynamic = true
}
}
return b.nodes, nil
}
func (n *Node) loadData(ctx context.Context) error {
if n.Driver == nil {
return nil
}
info, err := n.Driver.Info(ctx)
if err != nil {
return err
}
n.DriverInfo = info
if n.DriverInfo.Status == driver.Running {
driverClient, err := n.Driver.Client(ctx)
if err != nil {
return err
}
workers, err := driverClient.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "listing workers")
}
for _, w := range workers {
n.Platforms = append(n.Platforms, w.Platforms...)
}
n.Platforms = platformutil.Dedupe(n.Platforms)
inf, err := driverClient.Info(ctx)
if err != nil {
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
n.Version, err = n.Driver.Version(ctx)
if err != nil {
return errors.Wrap(err, "getting version")
}
}
} else {
n.Version = inf.BuildkitVersion.Version
}
}
return nil
}

View File

@@ -15,8 +15,13 @@ import (
cliflags "github.com/docker/cli/cli/flags" cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing/detect"
"go.opentelemetry.io/otel"
_ "k8s.io/client-go/plugin/pkg/client/auth/azure" _ "github.com/moby/buildkit/util/tracing/detect/delegated"
_ "github.com/moby/buildkit/util/tracing/env"
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc" _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack" _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
@@ -24,50 +29,55 @@ import (
_ "github.com/docker/buildx/driver/docker" _ "github.com/docker/buildx/driver/docker"
_ "github.com/docker/buildx/driver/docker-container" _ "github.com/docker/buildx/driver/docker-container"
_ "github.com/docker/buildx/driver/kubernetes" _ "github.com/docker/buildx/driver/kubernetes"
_ "github.com/docker/buildx/driver/remote"
) )
var experimental string
func init() { func init() {
seed.WithTimeAndRand() seed.WithTimeAndRand()
stack.SetVersionInfo(version.Version, version.Revision) stack.SetVersionInfo(version.Version, version.Revision)
}
func runStandalone(cmd *command.DockerCli) error { detect.ServiceName = "buildx"
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil { // do not log tracing errors to stdio
return err otel.SetErrorHandler(skipErrors{})
}
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
return rootCmd.Execute()
}
func runPlugin(cmd *command.DockerCli) error {
rootCmd := commands.NewRootCmd("buildx", true, cmd)
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
SchemaVersion: "0.1.0",
Vendor: "Docker Inc.",
Version: version.Version,
})
} }
func main() { func main() {
cmd, err := command.NewDockerCli() if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
dockerCli, err := command.NewDockerCli()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
opts := cliflags.NewClientOptions()
dockerCli.Initialize(opts)
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
os.Exit(0)
}
}
dockerCli, err := command.NewDockerCli()
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
os.Exit(1) os.Exit(1)
} }
if plugin.RunningStandalone() { p := commands.NewRootCmd("buildx", true, dockerCli)
err = runStandalone(cmd) meta := manager.Metadata{
} else { SchemaVersion: "0.1.0",
err = runPlugin(cmd) Vendor: "Docker Inc.",
} Version: version.Version,
if err == nil { Experimental: experimental != "",
return
} }
if err := plugin.RunPlugin(dockerCli, p, meta); err != nil {
if sterr, ok := err.(cli.StatusError); ok { if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" { if sterr.Status != "" {
fmt.Fprintln(cmd.Err(), sterr.Status) fmt.Fprintln(dockerCli.Err(), sterr.Status)
} }
// StatusError should only be used for errors, and all errors should // StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0 // have a non-zero exit status, so never exit with 0
@@ -76,15 +86,20 @@ func main() {
} }
os.Exit(sterr.StatusCode) os.Exit(sterr.StatusCode)
} }
for _, s := range errdefs.Sources(err) { for _, s := range errdefs.Sources(err) {
s.Print(cmd.Err()) s.Print(dockerCli.Err())
} }
if debug.IsEnabled() { if debug.IsEnabled() {
fmt.Fprintf(cmd.Err(), "ERROR: %+v", stack.Formatter(err)) fmt.Fprintf(dockerCli.Err(), "error: %+v", stack.Formatter(err))
} else { } else {
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err) fmt.Fprintf(dockerCli.Err(), "error: %v\n", err)
} }
os.Exit(1) os.Exit(1)
}
} }
type skipErrors struct{}
func (skipErrors) Handle(err error) {}

View File

@@ -1,19 +0,0 @@
package main
import (
"github.com/moby/buildkit/util/tracing/detect"
"go.opentelemetry.io/otel"
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
_ "github.com/moby/buildkit/util/tracing/env"
)
func init() {
detect.ServiceName = "buildx"
// do not log tracing errors to stdio
otel.SetErrorHandler(skipErrors{})
}
type skipErrors struct{}
func (skipErrors) Handle(err error) {}

View File

@@ -9,13 +9,11 @@ import (
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/docker/buildx/bake" "github.com/docker/buildx/bake"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing" "github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/pkg/ioutils"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -74,20 +72,11 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
if in.pull != nil { if in.pull != nil {
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull)) overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
} }
if in.sbom != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.sbom)))
}
if in.provenance != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("provenance", in.provenance)))
}
contextPathHash, _ := os.Getwd() contextPathHash, _ := os.Getwd()
ctx2, cancel := context.WithCancel(context.TODO()) ctx2, cancel := context.WithCancel(context.TODO())
defer cancel() defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress) printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
if err != nil {
return err
}
defer func() { defer func() {
if printer != nil { if printer != nil {
@@ -98,30 +87,16 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
} }
}() }()
var nodes []builder.Node dis, err := getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
if err != nil {
return err
}
var files []bake.File var files []bake.File
var inp *bake.Input var inp *bake.Input
// instance only needed for reading remote bake files or building
if url != "" || !in.printOnly {
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
return err
}
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return errors.Wrapf(err, "failed to update builder last activity time")
}
nodes, err = b.LoadNodes(ctx, false)
if err != nil {
return err
}
}
if url != "" { if url != "" {
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer) files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
} else { } else {
files, err = bake.ReadLocalFiles(in.files) files, err = bake.ReadLocalFiles(in.files)
} }
@@ -130,8 +105,8 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
} }
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{ tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
// don't forget to update documentation if you add a new // Don't forget to update documentation if you add a new
// built-in variable: docs/manuals/bake/file-definition.md#built-in-variables // built-in variable: docs/reference/buildx_bake.md#built-in-variables
"BAKE_CMD_CONTEXT": cmdContext, "BAKE_CMD_CONTEXT": cmdContext,
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(), "BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
}) })
@@ -146,11 +121,17 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
} }
if in.printOnly { if in.printOnly {
var defg map[string]*bake.Group
if len(grps) == 1 {
defg = map[string]*bake.Group{
"default": grps[0],
}
}
dt, err := json.MarshalIndent(struct { dt, err := json.MarshalIndent(struct {
Group map[string]*bake.Group `json:"group,omitempty"` Group map[string]*bake.Group `json:"group,omitempty"`
Target map[string]*bake.Target `json:"target"` Target map[string]*bake.Target `json:"target"`
}{ }{
grps, defg,
tgts, tgts,
}, "", " ") }, "", " ")
if err != nil { if err != nil {
@@ -165,17 +146,21 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
return nil return nil
} }
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer) resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
if err != nil { if err != nil {
return wrapBuildError(err, true) return err
} }
if len(in.metadataFile) > 0 { if len(in.metadataFile) > 0 && resp != nil {
dt := make(map[string]interface{}) mdata := map[string]map[string]string{}
for t, r := range resp { for k, r := range resp {
dt[t] = decodeExporterResponse(r.ExporterResponse) mdata[k] = r.ExporterResponse
} }
if err := writeMetadataFile(in.metadataFile, dt); err != nil { mdatab, err := json.MarshalIndent(mdata, "", " ")
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
return err return err
} }
} }
@@ -206,12 +191,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file") flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`) flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building") flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`) flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`")
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`) flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)")
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
commonBuildFlags(&options.commonOptions, flags) commonBuildFlags(&options.commonOptions, flags)

View File

@@ -1,50 +1,31 @@
package commands package commands
import ( import (
"bytes"
"context" "context"
"encoding/base64"
"encoding/csv"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"sync"
"github.com/containerd/console"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/buildflags" "github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing" "github.com/docker/buildx/util/tracing"
"github.com/docker/cli-docs-tool/annotation"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config"
dockeropts "github.com/docker/cli/opts" dockeropts "github.com/docker/cli/opts"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/morikuni/aec"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"google.golang.org/grpc/codes"
) )
const defaultTargetName = "default" const defaultTargetName = "default"
@@ -52,21 +33,16 @@ const defaultTargetName = "default"
type buildOptions struct { type buildOptions struct {
contextPath string contextPath string
dockerfileName string dockerfileName string
printFunc string
allow []string allow []string
attests []string
buildArgs []string buildArgs []string
cacheFrom []string cacheFrom []string
cacheTo []string cacheTo []string
cgroupParent string cgroupParent string
contexts []string
extraHosts []string extraHosts []string
imageIDFile string imageIDFile string
invoke string
labels []string labels []string
networkMode string networkMode string
noCacheFilter []string
outputs []string outputs []string
platforms []string platforms []string
quiet bool quiet bool
@@ -86,11 +62,11 @@ type commonOptions struct {
progress string progress string
pull *bool pull *bool
// golangci-lint#826
// nolint:structcheck
exportPush bool exportPush bool
// nolint:structcheck
exportLoad bool exportLoad bool
sbom string
provenance string
} }
func runBuild(dockerCli command.Cli, in buildOptions) (err error) { func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
@@ -113,32 +89,17 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
pull = *in.pull pull = *in.pull
} }
if noCache && len(in.noCacheFilter) > 0 { if in.quiet && in.progress != "auto" && in.progress != "quiet" {
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
if in.quiet && in.progress != progress.PrinterModeAuto && in.progress != progress.PrinterModeQuiet {
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress) return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
} else if in.quiet { } else if in.quiet {
in.progress = "quiet" in.progress = "quiet"
} }
contexts, err := parseContextNames(in.contexts)
if err != nil {
return err
}
printFunc, err := parsePrintFunc(in.printFunc)
if err != nil {
return err
}
opts := build.Options{ opts := build.Options{
Inputs: build.Inputs{ Inputs: build.Inputs{
ContextPath: in.contextPath, ContextPath: in.contextPath,
DockerfilePath: in.dockerfileName, DockerfilePath: in.dockerfileName,
InStream: os.Stdin, InStream: os.Stdin,
NamedContexts: contexts,
}, },
BuildArgs: listToMap(in.buildArgs, true), BuildArgs: listToMap(in.buildArgs, true),
ExtraHosts: in.extraHosts, ExtraHosts: in.extraHosts,
@@ -146,13 +107,11 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
Labels: listToMap(in.labels, false), Labels: listToMap(in.labels, false),
NetworkMode: in.networkMode, NetworkMode: in.networkMode,
NoCache: noCache, NoCache: noCache,
NoCacheFilter: in.noCacheFilter,
Pull: pull, Pull: pull,
ShmSize: in.shmSize, ShmSize: in.shmSize,
Tags: in.tags, Tags: in.tags,
Target: in.target, Target: in.target,
Ulimits: in.ulimits, Ulimits: in.ulimits,
PrintFunc: printFunc,
} }
platforms, err := platformutil.Parse(in.platforms) platforms, err := platformutil.Parse(in.platforms)
@@ -161,8 +120,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
} }
opts.Platforms = platforms opts.Platforms = platforms
dockerConfig := config.LoadDefaultConfigFile(os.Stderr) opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig))
secrets, err := buildflags.ParseSecretSpecs(in.secrets) secrets, err := buildflags.ParseSecretSpecs(in.secrets)
if err != nil { if err != nil {
@@ -218,19 +176,8 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
} }
} }
} }
opts.Exports = outputs
inAttests := append([]string{}, in.attests...) opts.Exports = outputs
if in.provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", in.provenance))
}
if in.sbom != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("sbom", in.sbom))
}
opts.Attests, err = buildflags.ParseAttests(inAttests)
if err != nil {
return err
}
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom) cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
if err != nil { if err != nil {
@@ -256,46 +203,10 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
contextPathHash = in.contextPath contextPathHash = in.contextPath
} }
b, err := builder.New(dockerCli, imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
builder.WithName(in.builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil { if err != nil {
return err return err
} }
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return errors.Wrapf(err, "failed to update builder last activity time")
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
}
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "")
err = wrapBuildError(err, false)
if err != nil {
return err
}
if in.invoke != "" {
cfg, err := parseInvokeConfig(in.invoke)
if err != nil {
return err
}
cfg.ResultCtx = res
con := console.Current()
if err := con.SetRaw(); err != nil {
return errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
_, rr, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, true)
return rr, err
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
if err != nil {
logrus.Warnf("failed to run monitor: %v", err)
}
con.Reset()
}
if in.quiet { if in.quiet {
fmt.Println(imageID) fmt.Println(imageID)
@@ -303,140 +214,37 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
return nil return nil
} }
type nopCloser struct { func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
io.WriteCloser dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
} if err != nil {
return "", err
}
func (c nopCloser) Close() error { return nil }
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
ctx2, cancel := context.WithCancel(context.TODO()) ctx2, cancel := context.WithCancel(context.TODO())
defer cancel() defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode) printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
if err != nil {
return "", nil, err
}
var mu sync.Mutex resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
var idx int
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
idx, res = driverIndex, gotRes
}
}, allowNoOutput)
err1 := printer.Wait() err1 := printer.Wait()
if err == nil { if err == nil {
err = err1 err = err1
} }
if err != nil { if err != nil {
return "", nil, err return "", err
} }
if len(metadataFile) > 0 && resp != nil { if len(metadataFile) > 0 && resp != nil {
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil { mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
return "", nil, err
}
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
for k := range resp {
if opts[k].PrintFunc != nil {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
return "", nil, err
}
}
}
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
}
func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
cfg.Tty = true
if invoke == "default" {
return cfg, nil
}
csvReader := csv.NewReader(strings.NewReader(invoke))
fields, err := csvReader.Read()
if err != nil { if err != nil {
return cfg, err return "", err
} }
if len(fields) == 1 && !strings.Contains(fields[0], "=") { if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
cfg.Cmd = []string{fields[0]} return "", err
return cfg, nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return cfg, errors.Errorf("invalid value %s", field)
}
key := strings.ToLower(parts[0])
value := parts[1]
switch key {
case "args":
cfg.Cmd = append(cfg.Cmd, value) // TODO: support JSON
case "entrypoint":
cfg.Entrypoint = append(cfg.Entrypoint, value) // TODO: support JSON
case "env":
cfg.Env = append(cfg.Env, value)
case "user":
cfg.User = &value
case "cwd":
cfg.Cwd = &value
case "tty":
cfg.Tty, err = strconv.ParseBool(value)
if err != nil {
return cfg, errors.Errorf("failed to parse tty: %v", err)
}
default:
return cfg, errors.Errorf("unknown key %q", key)
} }
} }
return cfg, nil
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) { return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
} }
func newBuildOptions() buildOptions { func newBuildOptions() buildOptions {
@@ -469,69 +277,54 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`) flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"}) flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`) flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables") flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`) flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`) flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"}) flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)") flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image") flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`) flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`) flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages") flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build") flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
if isExperimental() { flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets) [experimental]")
}
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`) flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`) flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`) flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`) flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"}) flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
flags.StringVar(&options.target, "target", "", "Set the target build stage to build") flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"}) flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
flags.Var(options.ulimits, "ulimit", "Ulimit options") flags.Var(options.ulimits, "ulimit", "Ulimit options")
flags.StringArrayVar(&options.attests, "attest", []string{}, `Attestation parameters (format: "type=sbom,generator=image")`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shortand for "--attest=type=provenance"`)
if isExperimental() {
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build [experimental]")
}
// hidden flags // hidden flags
var ignore string var ignore string
var ignoreSlice []string var ignoreSlice []string
@@ -556,7 +349,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit") flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
flags.MarkHidden("memory") flags.MarkHidden("memory")
flags.StringVar(&ignore, "memory-swap", "", `Swap limit equal to memory plus swap: "-1" to enable unlimited swap`) flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
flags.MarkHidden("memory-swap") flags.MarkHidden("memory-swap")
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)") flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
@@ -568,10 +361,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flags.MarkHidden("cpu-quota") flags.MarkHidden("cpu-quota")
flags.StringVar(&ignore, "cpuset-cpus", "", `CPUs in which to allow execution ("0-3", "0,1")`) flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-cpus") flags.MarkHidden("cpuset-cpus")
flags.StringVar(&ignore, "cpuset-mems", "", `MEMs in which to allow execution ("0-3", "0,1")`) flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-mems") flags.MarkHidden("cpuset-mems")
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build") flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
@@ -586,8 +379,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) { func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image") options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`) flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images") options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file") flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
} }
@@ -599,6 +392,7 @@ func checkWarnedFlags(f *pflag.Flag) {
switch t { switch t {
case "flag-warn": case "flag-warn":
logrus.Warn(m[0]) logrus.Warn(m[0])
break
} }
} }
} }
@@ -622,125 +416,3 @@ func listToMap(values []string, defaultEnv bool) map[string]string {
} }
return result return result
} }
func parseContextNames(values []string) (map[string]build.NamedContext, error) {
if len(values) == 0 {
return nil, nil
}
result := make(map[string]build.NamedContext, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) != 2 {
return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
}
named, err := reference.ParseNormalizedNamed(kv[0])
if err != nil {
return nil, errors.Wrapf(err, "invalid context name %s", kv[0])
}
name := strings.TrimSuffix(reference.FamiliarString(named), ":latest")
result[name] = build.NamedContext{Path: kv[1]}
}
return result, nil
}
func parsePrintFunc(str string) (*build.PrintFunc, error) {
if str == "" {
return nil, nil
}
csvReader := csv.NewReader(strings.NewReader(str))
fields, err := csvReader.Read()
if err != nil {
return nil, err
}
f := &build.PrintFunc{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 2 {
if parts[0] == "format" {
f.Format = parts[1]
} else {
return nil, errors.Errorf("invalid print field: %s", field)
}
} else {
if f.Name != "" {
return nil, errors.Errorf("invalid print value: %s", str)
}
f.Name = field
}
}
return f, nil
}
func writeMetadataFile(filename string, dt interface{}) error {
b, err := json.MarshalIndent(dt, "", " ")
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filename, b, 0644)
}
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
out := make(map[string]interface{})
for k, v := range exporterResponse {
dt, err := base64.StdEncoding.DecodeString(v)
if err != nil {
out[k] = v
continue
}
var raw map[string]interface{}
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
out[k] = v
continue
}
out[k] = json.RawMessage(dt)
}
return out
}
func wrapBuildError(err error, bake bool) error {
if err == nil {
return nil
}
st, ok := grpcerrors.AsGRPCStatus(err)
if ok {
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.contexts") {
msg := "current frontend does not support --build-context."
if bake {
msg = "current frontend does not support defining additional contexts for targets."
}
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
return &wrapped{err, msg}
}
}
return err
}
type wrapped struct {
err error
msg string
}
func (w *wrapped) Error() string {
return w.msg
}
func (w *wrapped) Unwrap() error {
return w.err
}
func isExperimental() bool {
if v, ok := os.LookupEnv("BUILDX_EXPERIMENTAL"); ok {
vv, _ := strconv.ParseBool(v)
return vv
}
return false
}
func updateLastActivity(dockerCli command.Cli, ng *store.NodeGroup) error {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
defer release()
return txn.UpdateLastActivity(ng)
}

View File

@@ -10,17 +10,12 @@ import (
"strings" "strings"
"time" "time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
remoteutil "github.com/docker/buildx/driver/remote/util"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil" "github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil" "github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
dopts "github.com/docker/cli/opts"
"github.com/google/shlex" "github.com/google/shlex"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -65,6 +60,22 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
} }
driverName := in.driver
if driverName == "" {
f, err := driver.GetDefaultFactory(ctx, dockerCli.Client(), true)
if err != nil {
return err
}
if f == nil {
return errors.Errorf("no valid drivers found")
}
driverName = f.Name()
}
if driver.GetFactory(driverName, true) == nil {
return errors.Errorf("failed to find driver %q", in.driver)
}
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := storeutil.GetStore(dockerCli)
if err != nil { if err != nil {
return err return err
@@ -79,19 +90,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
} }
if !in.actionLeave && !in.actionAppend {
contexts, err := dockerCli.ContextStore().List()
if err != nil {
return err
}
for _, c := range contexts {
if c.Name == name {
logrus.Warnf("instance name %q already exists as context builder", name)
break
}
}
}
ng, err := txn.NodeGroupByName(name) ng, err := txn.NodeGroupByName(name)
if err != nil { if err != nil {
if os.IsNotExist(errors.Cause(err)) { if os.IsNotExist(errors.Cause(err)) {
@@ -99,62 +97,29 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name) logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
} }
if in.actionLeave { if in.actionLeave {
return errors.Errorf("failed to find instance %q for leave", in.name) return errors.Errorf("failed to find instance %q for leave", name)
} }
} else { } else {
return err return err
} }
} }
buildkitHost := os.Getenv("BUILDKIT_HOST")
driverName := in.driver
if driverName == "" {
if ng != nil {
driverName = ng.Driver
} else if len(args) == 0 && buildkitHost != "" {
driverName = "remote"
} else {
var arg string
if len(args) > 0 {
arg = args[0]
}
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
if err != nil {
return err
}
if f == nil {
return errors.Errorf("no valid drivers found")
}
driverName = f.Name()
}
}
if ng != nil { if ng != nil {
if in.nodeName == "" && !in.actionAppend { if in.nodeName == "" && !in.actionAppend {
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name) return errors.Errorf("existing instance for %s but no append mode, specify --node to make changes for existing instances", name)
} }
if driverName != ng.Driver {
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
}
}
if _, err := driver.GetFactory(driverName, true); err != nil {
return err
}
ngOriginal := ng
if ngOriginal != nil {
ngOriginal = ngOriginal.Copy()
} }
if ng == nil { if ng == nil {
ng = &store.NodeGroup{ ng = &store.NodeGroup{
Name: name, Name: name,
Driver: driverName,
} }
} }
if ng.Driver == "" || in.driver != "" {
ng.Driver = driverName
}
var flags []string var flags []string
if in.flags != "" { if in.flags != "" {
flags, err = shlex.Split(in.flags) flags, err = shlex.Split(in.flags)
@@ -164,72 +129,44 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
var ep string var ep string
var setEp bool
if in.actionLeave { if in.actionLeave {
if err := ng.Leave(in.nodeName); err != nil { if err := ng.Leave(in.nodeName); err != nil {
return err return err
} }
} else { } else {
switch {
case driverName == "kubernetes":
if len(args) > 0 { if len(args) > 0 {
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0]) ep, err = validateEndpoint(dockerCli, args[0])
if err != nil {
return err
} }
} else {
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
}
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
}
if in.driver == "kubernetes" {
// naming endpoint to make --append works // naming endpoint to make --append works
ep = (&url.URL{ ep = (&url.URL{
Scheme: driverName, Scheme: in.driver,
Path: "/" + in.name, Path: "/" + in.name,
RawQuery: (&url.Values{ RawQuery: (&url.Values{
"deployment": {in.nodeName}, "deployment": {in.nodeName},
"kubeconfig": {os.Getenv("KUBECONFIG")}, "kubeconfig": {os.Getenv("KUBECONFIG")},
}).Encode(), }).Encode(),
}).String() }).String()
setEp = false
case driverName == "remote":
if len(args) > 0 {
ep = args[0]
} else if buildkitHost != "" {
ep = buildkitHost
} else {
return errors.Errorf("no remote endpoint provided")
}
ep, err = validateBuildkitEndpoint(ep)
if err != nil {
return err
}
setEp = true
case len(args) > 0:
ep, err = validateEndpoint(dockerCli, args[0])
if err != nil {
return err
}
setEp = true
default:
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
}
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
setEp = false
} }
m, err := csvToMap(in.driverOpts) m, err := csvToMap(in.driverOpts)
if err != nil { if err != nil {
return err return err
} }
if err := ng.Update(in.nodeName, ep, in.platform, len(args) > 0, in.actionAppend, flags, in.configFile, m); err != nil {
if in.configFile == "" {
// if buildkit config is not provided, check if the default one is
// available and use it
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
logrus.Warnf("Using default BuildKit config in %s", f)
in.configFile = f
}
}
if err := ng.Update(in.nodeName, ep, in.platform, setEp, in.actionAppend, flags, in.configFile, m); err != nil {
return err return err
} }
} }
@@ -238,41 +175,8 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
return err return err
} }
b, err := builder.New(dockerCli,
builder.WithName(ng.Name),
builder.WithStore(txn),
builder.WithSkippedValidation(),
)
if err != nil {
return err
}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
nodes, err := b.LoadNodes(timeoutCtx, true)
if err != nil {
return err
}
for _, node := range nodes {
if err := node.Err; err != nil {
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
var err2 error
if ngOriginal == nil {
err2 = txn.Remove(ng.Name)
} else {
err2 = txn.Save(ngOriginal)
}
if err2 != nil {
logrus.Warnf("Could not rollback to previous state: %s", err2)
}
return err
}
}
if in.use && ep != "" { if in.use && ep != "" {
current, err := dockerutil.GetCurrentEndpoint(dockerCli) current, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -281,8 +185,17 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
} }
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
return err
}
if in.bootstrap { if in.bootstrap {
if _, err = b.Boot(ctx); err != nil { if _, err = boot(ctx, ngi); err != nil {
return err return err
} }
} }
@@ -295,11 +208,11 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
var options createOptions var options createOptions
var drivers bytes.Buffer var drivers bytes.Buffer
for _, d := range driver.GetFactories(true) { for _, d := range driver.GetFactories() {
if len(drivers.String()) > 0 { if len(drivers.String()) > 0 {
drivers.WriteString(", ") drivers.WriteString(", ")
} }
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name())) drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
} }
cmd := &cobra.Command{ cmd := &cobra.Command{
@@ -333,9 +246,6 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
} }
func csvToMap(in []string) (map[string]string, error) { func csvToMap(in []string) (map[string]string, error) {
if len(in) == 0 {
return nil, nil
}
m := make(map[string]string, len(in)) m := make(map[string]string, len(in))
for _, s := range in { for _, s := range in {
csvReader := csv.NewReader(strings.NewReader(s)) csvReader := csv.NewReader(strings.NewReader(s))
@@ -353,27 +263,3 @@ func csvToMap(in []string) (map[string]string, error) {
} }
return m, nil return m, nil
} }
// validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
if err == nil && dem != nil {
if ep == "default" {
return dem.Host, nil
}
return ep, nil
}
h, err := dopts.ParseHost(true, ep)
if err != nil {
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
}
return h, nil
}
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
func validateBuildkitEndpoint(ep string) (string, error) {
if err := remoteutil.IsValidEndpoint(ep); err != nil {
return "", err
}
return ep, nil
}

View File

@@ -1,26 +0,0 @@
package commands
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCsvToMap(t *testing.T) {
d := []string{
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
"namespace=default",
}
r, err := csvToMap(d)
require.NoError(t, err)
require.Contains(t, r, "tolerations")
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
require.Contains(t, r, "replicas")
require.Equal(t, r["replicas"], "1")
require.Contains(t, r, "namespace")
require.Equal(t, r["namespace"], "default")
}

View File

@@ -4,18 +4,16 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"text/tabwriter" "text/tabwriter"
"time"
"github.com/docker/buildx/builder" "github.com/docker/buildx/build"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/opts" "github.com/docker/cli/opts"
"github.com/docker/go-units"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tonistiigi/units"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -33,29 +31,25 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
return err return err
} }
b, err := builder.New(dockerCli, builder.WithName(opts.builder)) dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
if err != nil { if err != nil {
return err return err
} }
nodes, err := b.LoadNodes(ctx, false) for _, di := range dis {
if err != nil { if di.Err != nil {
return err return err
} }
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
} }
out := make([][]*client.UsageInfo, len(nodes)) out := make([][]*client.UsageInfo, len(dis))
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
for i, node := range nodes { for i, di := range dis {
func(i int, node builder.Node) { func(i int, di build.DriverInfo) {
eg.Go(func() error { eg.Go(func() error {
if node.Driver != nil { if di.Driver != nil {
c, err := node.Driver.Client(ctx) c, err := di.Driver.Client(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -68,7 +62,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
} }
return nil return nil
}) })
}(i, node) }(i, di)
} }
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
@@ -131,20 +125,20 @@ func printKV(w io.Writer, k string, v interface{}) {
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) { func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
for _, di := range du { for _, di := range du {
printKV(tw, "ID", di.ID) printKV(tw, "ID", di.ID)
if len(di.Parents) != 0 { if di.Parent != "" {
printKV(tw, "Parent", strings.Join(di.Parents, ",")) printKV(tw, "Parent", di.Parent)
} }
printKV(tw, "Created at", di.CreatedAt) printKV(tw, "Created at", di.CreatedAt)
printKV(tw, "Mutable", di.Mutable) printKV(tw, "Mutable", di.Mutable)
printKV(tw, "Reclaimable", !di.InUse) printKV(tw, "Reclaimable", !di.InUse)
printKV(tw, "Shared", di.Shared) printKV(tw, "Shared", di.Shared)
printKV(tw, "Size", units.HumanSize(float64(di.Size))) printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
if di.Description != "" { if di.Description != "" {
printKV(tw, "Description", di.Description) printKV(tw, "Description", di.Description)
} }
printKV(tw, "Usage count", di.UsageCount) printKV(tw, "Usage count", di.UsageCount)
if di.LastUsedAt != nil { if di.LastUsedAt != nil {
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago") printKV(tw, "Last used", di.LastUsedAt)
} }
if di.RecordType != "" { if di.RecordType != "" {
printKV(tw, "Type", di.RecordType) printKV(tw, "Type", di.RecordType)
@@ -165,15 +159,11 @@ func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
if di.Mutable { if di.Mutable {
id += "*" id += "*"
} }
size := units.HumanSize(float64(di.Size)) size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
if di.Shared { if di.Shared {
size += "*" size += "*"
} }
lastAccessed := "" fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
if di.LastUsedAt != nil {
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
}
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
} }
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) { func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
@@ -196,11 +186,11 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
} }
if shared > 0 { if shared > 0 {
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared))) fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared))) fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
} }
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable))) fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total))) fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
tw.Flush() tw.Flush()
} }

View File

@@ -1,15 +1,14 @@
package commands package commands
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "io/ioutil"
"strings" "strings"
"github.com/docker/buildx/builder" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
@@ -26,7 +25,6 @@ type createOptions struct {
tags []string tags []string
dryrun bool dryrun bool
actionAppend bool actionAppend bool
progress string
} }
func runCreate(dockerCli command.Cli, in createOptions, args []string) error { func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
@@ -40,7 +38,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
fileArgs := make([]string, len(in.files)) fileArgs := make([]string, len(in.files))
for i, f := range in.files { for i, f := range in.files {
dt, err := os.ReadFile(f) dt, err := ioutil.ReadFile(f)
if err != nil { if err != nil {
return err return err
} }
@@ -80,43 +78,53 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
if len(repos) == 0 { if len(repos) == 0 {
return errors.Errorf("no repositories specified, please set a reference in tag or source") return errors.Errorf("no repositories specified, please set a reference in tag or source")
} }
if len(repos) > 1 {
var defaultRepo *string return errors.Errorf("multiple repositories currently not supported, found %v", repos)
if len(repos) == 1 {
for repo := range repos {
defaultRepo = &repo
} }
var repo string
for r := range repos {
repo = r
} }
for i, s := range srcs { for i, s := range srcs {
if s.Ref == nil { if s.Ref == nil && s.Desc.MediaType == "" && s.Desc.Digest != "" {
if defaultRepo == nil { n, err := reference.ParseNormalizedNamed(repo)
return errors.Errorf("multiple repositories specified, cannot infer repository for %q", args[i])
}
n, err := reference.ParseNormalizedNamed(*defaultRepo)
if err != nil { if err != nil {
return err return err
} }
if s.Desc.MediaType == "" && s.Desc.Digest != "" {
r, err := reference.WithDigest(n, s.Desc.Digest) r, err := reference.WithDigest(n, s.Desc.Digest)
if err != nil { if err != nil {
return err return err
} }
srcs[i].Ref = r srcs[i].Ref = r
sourceRefs = true sourceRefs = true
} else {
srcs[i].Ref = reference.TagNameOnly(n)
}
} }
} }
ctx := appcontext.Context() ctx := appcontext.Context()
b, err := builder.New(dockerCli, builder.WithName(in.builder)) txn, release, err := storeutil.GetStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
imageopt, err := b.ImageOpt() defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil { if err != nil {
return err return err
} }
@@ -135,6 +143,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
if err != nil { if err != nil {
return err return err
} }
srcs[i].Ref = nil
if srcs[i].Desc.Digest == "" { if srcs[i].Desc.Digest == "" {
srcs[i].Desc = desc srcs[i].Desc = desc
} else { } else {
@@ -153,7 +162,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
} }
dt, desc, err := r.Combine(ctx, srcs) descs := make([]ocispec.Descriptor, len(srcs))
for i := range descs {
descs[i] = srcs[i].Desc
}
dt, desc, err := r.Combine(ctx, repo, descs)
if err != nil { if err != nil {
return err return err
} }
@@ -166,52 +180,23 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
// new resolver cause need new auth // new resolver cause need new auth
r = imagetools.New(imageopt) r = imagetools.New(imageopt)
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
if err != nil {
return err
}
eg, _ := errgroup.WithContext(ctx)
pw := progress.WithPrefix(printer, "internal", true)
for _, t := range tags { for _, t := range tags {
t := t if err := r.Push(ctx, t, desc, dt); err != nil {
eg.Go(func() error {
return progress.Wrap(fmt.Sprintf("pushing %s", t.String()), pw.Write, func(sub progress.SubLogger) error {
eg2, _ := errgroup.WithContext(ctx)
for _, s := range srcs {
if reference.Domain(s.Ref) == reference.Domain(t) && reference.Path(s.Ref) == reference.Path(t) {
continue
}
s := s
eg2.Go(func() error {
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
return r.Copy(ctx, s, t)
})
}
if err := eg2.Wait(); err != nil {
return err return err
} }
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String()))) fmt.Println(t.String())
return r.Push(ctx, t, desc, dt)
})
})
} }
err = eg.Wait() return nil
err1 := printer.Wait()
if err == nil {
err = err1
}
return err
} }
func parseSources(in []string) ([]*imagetools.Source, error) { type src struct {
out := make([]*imagetools.Source, len(in)) Desc ocispec.Descriptor
Ref reference.Named
}
func parseSources(in []string) ([]*src, error) {
out := make([]*src, len(in))
for i, in := range in { for i, in := range in {
s, err := parseSource(in) s, err := parseSource(in)
if err != nil { if err != nil {
@@ -234,11 +219,11 @@ func parseRefs(in []string) ([]reference.Named, error) {
return refs, nil return refs, nil
} }
func parseSource(in string) (*imagetools.Source, error) { func parseSource(in string) (*src, error) {
// source can be a digest, reference or a descriptor JSON // source can be a digest, reference or a descriptor JSON
dgst, err := digest.Parse(in) dgst, err := digest.Parse(in)
if err == nil { if err == nil {
return &imagetools.Source{ return &src{
Desc: ocispec.Descriptor{ Desc: ocispec.Descriptor{
Digest: dgst, Digest: dgst,
}, },
@@ -249,14 +234,14 @@ func parseSource(in string) (*imagetools.Source, error) {
ref, err := reference.ParseNormalizedNamed(in) ref, err := reference.ParseNormalizedNamed(in)
if err == nil { if err == nil {
return &imagetools.Source{ return &src{
Ref: ref, Ref: ref,
}, nil }, nil
} else if !strings.HasPrefix(in, "{") { } else if !strings.HasPrefix(in, "{") {
return nil, err return nil, err
} }
var s imagetools.Source var s src
if err := json.Unmarshal([]byte(in), &s.Desc); err != nil { if err := json.Unmarshal([]byte(in), &s.Desc); err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }
@@ -270,7 +255,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
Use: "create [OPTIONS] [SOURCE] [SOURCE...]", Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
Short: "Create a new image based on source images", Short: "Create a new image based on source images",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.builder = *opts.Builder options.builder = opts.Builder
return runCreate(dockerCli, options, args) return runCreate(dockerCli, options, args)
}, },
} }
@@ -280,7 +265,6 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image") flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing") flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest") flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
return cmd return cmd
} }

View File

@@ -1,44 +1,74 @@
package commands package commands
import ( import (
"github.com/docker/buildx/builder" "fmt"
"os"
"github.com/containerd/containerd/images"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/cli-docs-tool/annotation"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
type inspectOptions struct { type inspectOptions struct {
builder string
format string
raw bool raw bool
builder string
} }
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error { func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
ctx := appcontext.Context() ctx := appcontext.Context()
if in.format != "" && in.raw { txn, release, err := storeutil.GetStore(dockerCli)
return errors.Errorf("format and raw cannot be used together")
}
b, err := builder.New(dockerCli, builder.WithName(in.builder))
if err != nil { if err != nil {
return err return err
} }
imageopt, err := b.ImageOpt() defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return err
}
r := imagetools.New(imageopt)
dt, desc, err := r.Get(ctx, name)
if err != nil { if err != nil {
return err return err
} }
p, err := imagetools.NewPrinter(ctx, imageopt, name, in.format) if in.raw {
if err != nil { fmt.Printf("%s", dt) // avoid newline to keep digest
return err return nil
} }
return p.Print(in.raw, dockerCli.Out()) switch desc.MediaType {
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
// TODO: handle distribution manifest and schema1
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
default:
fmt.Printf("%s\n", dt)
}
return nil
} }
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command { func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
@@ -46,20 +76,16 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "inspect [OPTIONS] NAME", Use: "inspect [OPTIONS] NAME",
Short: "Show details of an image in the registry", Short: "Show details of image in the registry",
Args: cli.ExactArgs(1), Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.builder = *rootOpts.Builder options.builder = rootOpts.Builder
return runInspect(dockerCli, options, args[0]) return runInspect(dockerCli, options, args[0])
}, },
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
flags.StringVar(&options.format, "format", "", "Format the output using the given Go template")
flags.SetAnnotation("format", annotation.DefaultValue, []string{`"{{.Manifest}}"`})
flags.BoolVar(&options.raw, "raw", false, "Show original, unformatted JSON manifest")
return cmd return cmd
} }

View File

@@ -6,7 +6,7 @@ import (
) )
type RootOptions struct { type RootOptions struct {
Builder *string Builder string
} }
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command { func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
@@ -16,8 +16,8 @@ func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
} }
cmd.AddCommand( cmd.AddCommand(
createCmd(dockerCli, opts),
inspectCmd(dockerCli, opts), inspectCmd(dockerCli, opts),
createCmd(dockerCli, opts),
) )
return cmd return cmd

View File

@@ -8,7 +8,8 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/docker/buildx/builder" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -24,71 +25,87 @@ type inspectOptions struct {
func runInspect(dockerCli command.Cli, in inspectOptions) error { func runInspect(dockerCli command.Cli, in inspectOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
b, err := builder.New(dockerCli, txn, release, err := storeutil.GetStore(dockerCli)
builder.WithName(in.builder),
builder.WithSkippedValidation(),
)
if err != nil { if err != nil {
return err return err
} }
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
if ng == nil {
ng = &store.NodeGroup{
Name: "default",
Nodes: []store.Node{{
Name: "default",
Endpoint: "default",
}},
}
}
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second) timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel() defer cancel()
nodes, err := b.LoadNodes(timeoutCtx, true) err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
var bootNgi *nginfo
if in.bootstrap { if in.bootstrap {
var ok bool var ok bool
ok, err = b.Boot(ctx) ok, err = boot(ctx, ngi)
if err != nil { if err != nil {
return err return err
} }
bootNgi = ngi
if ok { if ok {
nodes, err = b.LoadNodes(timeoutCtx, true) ngi = &nginfo{ng: ng}
err = loadNodeGroupData(ctx, dockerCli, ngi)
} }
} }
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "Name:\t%s\n", b.Name) fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
fmt.Fprintf(w, "Driver:\t%s\n", b.Driver) fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
if !b.NodeGroup.LastActivity.IsZero() {
fmt.Fprintf(w, "Last Activity:\t%v\n", b.NodeGroup.LastActivity)
}
if err != nil { if err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error()) fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if b.Err() != nil { } else if ngi.err != nil {
fmt.Fprintf(w, "Error:\t%s\n", b.Err().Error()) fmt.Fprintf(w, "Error:\t%s\n", ngi.err.Error())
} }
if err == nil { if err == nil {
fmt.Fprintln(w, "") fmt.Fprintln(w, "")
fmt.Fprintln(w, "Nodes:") fmt.Fprintln(w, "Nodes:")
for i, n := range nodes { for i, n := range ngi.ng.Nodes {
if i != 0 { if i != 0 {
fmt.Fprintln(w, "") fmt.Fprintln(w, "")
} }
fmt.Fprintf(w, "Name:\t%s\n", n.Name) fmt.Fprintf(w, "Name:\t%s\n", n.Name)
fmt.Fprintf(w, "Endpoint:\t%s\n", n.Endpoint) fmt.Fprintf(w, "Endpoint:\t%s\n", n.Endpoint)
if err := ngi.drivers[i].di.Err; err != nil {
var driverOpts []string
for k, v := range n.DriverOpts {
driverOpts = append(driverOpts, fmt.Sprintf("%s=%q", k, v))
}
if len(driverOpts) > 0 {
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
}
if err := n.Err; err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error()) fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if err := ngi.drivers[i].err; err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
} else { } else {
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status) fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
if len(n.Flags) > 0 { if len(n.Flags) > 0 {
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " ")) fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
} }
if nodes[i].Version != "" { fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
}
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
} }
} }
} }

View File

@@ -4,11 +4,12 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"os"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/docker/buildx/builder" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil" "github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil" "github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
@@ -31,24 +32,45 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
} }
defer release() defer release()
current, err := storeutil.GetCurrentInstance(txn, dockerCli) ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
if err != nil {
return err
}
builders, err := builder.GetBuilders(dockerCli, txn)
if err != nil {
return err
}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel() defer cancel()
eg, _ := errgroup.WithContext(timeoutCtx) ll, err := txn.List()
if err != nil {
return err
}
builders := make([]*nginfo, len(ll))
for i, ng := range ll {
builders[i] = &nginfo{ng: ng}
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return err
}
ctxbuilders := make([]*nginfo, len(list))
for i, l := range list {
ctxbuilders[i] = &nginfo{ng: &store.NodeGroup{
Name: l.Name,
Nodes: []store.Node{{
Name: l.Name,
Endpoint: l.Name,
}},
}}
}
builders = append(builders, ctxbuilders...)
eg, _ := errgroup.WithContext(ctx)
for _, b := range builders { for _, b := range builders {
func(b *builder.Builder) { func(b *nginfo) {
eg.Go(func() error { eg.Go(func() error {
_, _ = b.LoadNodes(timeoutCtx, true) err = loadNodeGroupData(ctx, dockerCli, b)
if b.err == nil && err != nil {
b.err = err
}
return nil return nil
}) })
}(b) }(b)
@@ -58,62 +80,61 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
return err return err
} }
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0) currentName := "default"
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n") current, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
if current != nil {
currentName = current.Name
if current.Name == "default" {
currentName = current.Nodes[0].Endpoint
}
}
printErr := false w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tPLATFORMS\n")
currentSet := false
for _, b := range builders { for _, b := range builders {
if current.Name == b.Name { if !currentSet && b.ng.Name == currentName {
b.Name += " *" b.ng.Name += " *"
} currentSet = true
if ok := printBuilder(w, b); !ok {
printErr = true
} }
printngi(w, b)
} }
w.Flush() w.Flush()
if printErr {
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
for _, b := range builders {
if b.Err() != nil {
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error()))
} else {
for _, d := range b.Nodes() {
if d.Err != nil {
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error()))
}
}
}
}
}
return nil return nil
} }
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) { func printngi(w io.Writer, ngi *nginfo) {
ok = true
var err string var err string
if b.Err() != nil { if ngi.err != nil {
ok = false err = ngi.err.Error()
err = "error" }
fmt.Fprintf(w, "%s\t%s\t%s\t\n", ngi.ng.Name, ngi.ng.Driver, err)
if ngi.err == nil {
for idx, n := range ngi.ng.Nodes {
d := ngi.drivers[idx]
var err string
if d.err != nil {
err = d.err.Error()
} else if d.di.Err != nil {
err = d.di.Err.Error()
} }
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
if b.Err() == nil {
for _, n := range b.Nodes() {
var status string var status string
if n.DriverInfo != nil { if d.info != nil {
status = n.DriverInfo.Status.String() status = d.info.Status.String()
} }
if n.Err != nil { if err != "" {
ok = false fmt.Fprintf(w, " %s\t%s\t%s\n", n.Name, n.Endpoint, err)
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
} else { } else {
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", ")) fmt.Fprintf(w, " %s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
} }
} }
} }
return
} }
func lsCmd(dockerCli command.Cli) *cobra.Command { func lsCmd(dockerCli command.Cli) *cobra.Command {

View File

@@ -1,48 +0,0 @@
package commands
import (
"fmt"
"io"
"log"
"os"
"github.com/docker/buildx/build"
"github.com/docker/docker/api/types/versions"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets"
)
func printResult(f *build.PrintFunc, res map[string]string) error {
switch f.Name {
case "outline":
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
case "targets":
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
case "subrequests.describe":
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
default:
if dt, ok := res["result.txt"]; ok {
fmt.Print(dt)
} else {
log.Printf("%s %+v", f, res)
}
}
return nil
}
type printFunc func([]byte, io.Writer) error
func printValue(printer printFunc, version string, format string, res map[string]string) error {
if format == "json" {
fmt.Fprintln(os.Stdout, res["result.json"])
return nil
}
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
// structure is too new and we don't know how to print it
fmt.Fprint(os.Stdout, res["result.txt"])
return nil
}
return printer([]byte(res["result.json"]), os.Stdout)
}

View File

@@ -7,16 +7,16 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/docker/buildx/builder" "github.com/docker/buildx/build"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/opts" "github.com/docker/cli/opts"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/go-units"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tonistiigi/units"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -54,19 +54,15 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
return nil return nil
} }
b, err := builder.New(dockerCli, builder.WithName(opts.builder)) dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
if err != nil { if err != nil {
return err return err
} }
nodes, err := b.LoadNodes(ctx, false) for _, di := range dis {
if err != nil { if di.Err != nil {
return err return err
} }
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
} }
ch := make(chan client.UsageInfo) ch := make(chan client.UsageInfo)
@@ -94,11 +90,11 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
}() }()
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
for _, node := range nodes { for _, di := range dis {
func(node builder.Node) { func(di build.DriverInfo) {
eg.Go(func() error { eg.Go(func() error {
if node.Driver != nil { if di.Driver != nil {
c, err := node.Driver.Client(ctx) c, err := di.Driver.Client(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -113,7 +109,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
} }
return nil return nil
}) })
}(node) }(di)
} }
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
@@ -123,7 +119,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
<-printed <-printed
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total))) fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
tw.Flush() tw.Flush()
return nil return nil
} }
@@ -142,8 +138,8 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images") flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`) flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)")
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache") flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output") flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
@@ -159,9 +155,9 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
if len(untilValues) > 0 && len(unusedForValues) > 0 { if len(untilValues) > 0 && len(unusedForValues) > 0 {
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for") return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
} }
untilKey := "until" filterKey := "until"
if len(unusedForValues) > 0 { if len(unusedForValues) > 0 {
untilKey = "unused-for" filterKey = "unused-for"
} }
untilValues = append(untilValues, unusedForValues...) untilValues = append(untilValues, unusedForValues...)
@@ -172,27 +168,23 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
var err error var err error
until, err = time.ParseDuration(untilValues[0]) until, err = time.ParseDuration(untilValues[0])
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", untilKey) return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
} }
default: default:
return nil, errors.Errorf("filters expect only one value") return nil, errors.Errorf("filters expect only one value")
} }
filters := make([]string, 0, f.Len()) bkFilter := make([]string, 0, f.Len())
for _, filterKey := range f.Keys() { for _, field := range f.Keys() {
if filterKey == untilKey { values := f.Get(field)
continue
}
values := f.Get(filterKey)
switch len(values) { switch len(values) {
case 0: case 0:
filters = append(filters, filterKey) bkFilter = append(bkFilter, field)
case 1: case 1:
if filterKey == "id" { if field == "id" {
filters = append(filters, filterKey+"~="+values[0]) bkFilter = append(bkFilter, field+"~="+values[0])
} else { } else {
filters = append(filters, filterKey+"=="+values[0]) bkFilter = append(bkFilter, field+"=="+values[0])
} }
default: default:
return nil, errors.Errorf("filters expect only one value") return nil, errors.Errorf("filters expect only one value")
@@ -200,6 +192,6 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
} }
return &client.PruneInfo{ return &client.PruneInfo{
KeepDuration: until, KeepDuration: until,
Filter: []string{strings.Join(filters, ",")}, Filter: []string{strings.Join(bkFilter, ",")},
}, nil }, nil
} }

View File

@@ -2,76 +2,53 @@ package commands
import ( import (
"context" "context"
"fmt"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil" "github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
) )
type rmOptions struct { type rmOptions struct {
builder string builder string
keepState bool keepState bool
keepDaemon bool
allInactive bool
force bool
} }
const (
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
)
func runRm(dockerCli command.Cli, in rmOptions) error { func runRm(dockerCli command.Cli, in rmOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
return nil
}
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := storeutil.GetStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
defer release() defer release()
if in.allInactive { if in.builder != "" {
return rmAllInactive(ctx, txn, dockerCli, in) ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
}
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithStore(txn),
builder.WithSkippedValidation(),
)
if err != nil { if err != nil {
return err return err
} }
err1 := rm(ctx, dockerCli, ng, in.keepState)
nodes, err := b.LoadNodes(ctx, false) if err := txn.Remove(ng.Name); err != nil {
if err != nil { return err
}
return err1
}
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
if ng != nil {
err1 := rm(ctx, dockerCli, ng, in.keepState)
if err := txn.Remove(ng.Name); err != nil {
return err return err
} }
if cb := b.ContextName(); cb != "" {
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
}
err1 := rm(ctx, nodes, in)
if err := txn.Remove(b.Name); err != nil {
return err
}
if err1 != nil {
return err1 return err1
} }
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
return nil return nil
} }
@@ -85,9 +62,6 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.builder = rootOpts.builder options.builder = rootOpts.builder
if len(args) > 0 { if len(args) > 0 {
if options.allInactive {
return errors.New("cannot specify builder name when --all-inactive is set")
}
options.builder = args[0] options.builder = args[0]
} }
return runRm(dockerCli, options) return runRm(dockerCli, options)
@@ -96,69 +70,27 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state") flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
return cmd return cmd
} }
func rm(ctx context.Context, nodes []builder.Node, in rmOptions) (err error) { func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
for _, node := range nodes { dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
if node.Driver == nil { if err != nil {
continue return err
} }
// Do not stop the buildkitd daemon when --keep-daemon is provided for _, di := range dis {
if !in.keepDaemon { if di.Driver != nil {
if err := node.Driver.Stop(ctx, true); err != nil { if err := di.Driver.Stop(ctx, true); err != nil {
return err
}
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
return err return err
} }
} }
if err := node.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil { if di.Err != nil {
return err err = di.Err
}
if node.Err != nil {
err = node.Err
} }
} }
return err return err
} }
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
builders, err := builder.GetBuilders(dockerCli, txn)
if err != nil {
return err
}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
eg, _ := errgroup.WithContext(timeoutCtx)
for _, b := range builders {
func(b *builder.Builder) {
eg.Go(func() error {
nodes, err := b.LoadNodes(timeoutCtx, true)
if err != nil {
return errors.Wrapf(err, "cannot load %s", b.Name)
}
if cb := b.ContextName(); cb != "" {
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
}
if b.Dynamic {
return nil
}
if b.Inactive() {
rmerr := rm(ctx, nodes, in)
if err := txn.Remove(b.Name); err != nil {
return err
}
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
return rmerr
}
return nil
})
}(b)
}
return eg.Wait()
}

View File

@@ -4,12 +4,8 @@ import (
"os" "os"
imagetoolscmd "github.com/docker/buildx/commands/imagetools" imagetoolscmd "github.com/docker/buildx/commands/imagetools"
"github.com/docker/buildx/util/logutil"
"github.com/docker/cli-docs-tool/annotation"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli-plugins/plugin" "github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )
@@ -19,45 +15,13 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
Short: "Docker Buildx", Short: "Docker Buildx",
Long: `Extended build capabilities with BuildKit`, Long: `Extended build capabilities with BuildKit`,
Use: name, Use: name,
Annotations: map[string]string{
annotation.CodeDelimiter: `"`,
},
} }
if isPlugin { if isPlugin {
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
return plugin.PersistentPreRunE(cmd, args) return plugin.PersistentPreRunE(cmd, args)
} }
} else {
// match plugin behavior for standalone mode
// https://github.com/docker/cli/blob/6c9eb708fa6d17765d71965f90e1c59cea686ee9/cli-plugins/plugin/plugin.go#L117-L127
cmd.SilenceUsage = true
cmd.SilenceErrors = true
cmd.TraverseChildren = true
cmd.DisableFlagsInUseLine = true
cli.DisableFlagsInUseLine(cmd)
} }
logrus.SetFormatter(&logutil.Formatter{})
logrus.AddHook(logutil.NewFilter([]logrus.Level{
logrus.DebugLevel,
},
"serving grpc connection",
"stopping session",
"using default config store",
))
// filter out useless commandConn.CloseWrite warning message that can occur
// when listing builder instances with "buildx ls" for those that are
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
logrus.AddHook(logutil.NewFilter([]logrus.Level{
logrus.WarnLevel,
},
"commandConn.CloseWrite:",
"commandConn.CloseRead:",
))
addCommands(cmd, dockerCli) addCommands(cmd, dockerCli)
return cmd return cmd
} }
@@ -84,7 +48,7 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
versionCmd(dockerCli), versionCmd(dockerCli),
pruneCmd(dockerCli, opts), pruneCmd(dockerCli, opts),
duCmd(dockerCli, opts), duCmd(dockerCli, opts),
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}), imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: opts.builder}),
) )
} }

View File

@@ -3,7 +3,8 @@ package commands
import ( import (
"context" "context"
"github.com/docker/buildx/builder" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
@@ -17,19 +18,32 @@ type stopOptions struct {
func runStop(dockerCli command.Cli, in stopOptions) error { func runStop(dockerCli command.Cli, in stopOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
b, err := builder.New(dockerCli, txn, release, err := storeutil.GetStore(dockerCli)
builder.WithName(in.builder),
builder.WithSkippedValidation(),
)
if err != nil { if err != nil {
return err return err
} }
nodes, err := b.LoadNodes(ctx, false) defer release()
if in.builder != "" {
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil { if err != nil {
return err return err
} }
if err := stop(ctx, dockerCli, ng); err != nil {
return err
}
return nil
}
return stop(ctx, nodes) ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
if ng != nil {
return stop(ctx, dockerCli, ng)
}
return stopCurrent(ctx, dockerCli)
} }
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command { func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
@@ -51,15 +65,37 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd return cmd
} }
func stop(ctx context.Context, nodes []builder.Node) (err error) { func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
for _, node := range nodes { dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
if node.Driver != nil { if err != nil {
if err := node.Driver.Stop(ctx, true); err != nil { return err
}
for _, di := range dis {
if di.Driver != nil {
if err := di.Driver.Stop(ctx, true); err != nil {
return err return err
} }
} }
if node.Err != nil { if di.Err != nil {
err = node.Err err = di.Err
}
}
return err
}
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
if err != nil {
return err
}
for _, di := range dis {
if di.Driver != nil {
if err := di.Driver.Stop(ctx, true); err != nil {
return err
}
}
if di.Err != nil {
err = di.Err
} }
} }
return err return err

View File

@@ -4,7 +4,6 @@ import (
"os" "os"
"github.com/docker/buildx/store/storeutil" "github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -30,7 +29,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Errorf("run `docker context use default` to switch to default context") return errors.Errorf("run `docker context use default` to switch to default context")
} }
if in.builder == "default" || in.builder == dockerCli.CurrentContext() { if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
ep, err := dockerutil.GetCurrentEndpoint(dockerCli) ep, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -53,7 +52,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Wrapf(err, "failed to find instance %q", in.builder) return errors.Wrapf(err, "failed to find instance %q", in.builder)
} }
ep, err := dockerutil.GetCurrentEndpoint(dockerCli) ep, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }

432
commands/util.go Normal file
View File

@@ -0,0 +1,432 @@
package commands
import (
"context"
"net/url"
"os"
"strings"
"github.com/docker/buildx/build"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/kubernetes"
ctxstore "github.com/docker/cli/cli/context/store"
dopts "github.com/docker/cli/opts"
dockerclient "github.com/docker/docker/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/tools/clientcmd"
)
// validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
de, err := storeutil.GetDockerEndpoint(dockerCli, ep)
if err == nil && de != "" {
if ep == "default" {
return de, nil
}
return ep, nil
}
h, err := dopts.ParseHost(true, ep)
if err != nil {
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
}
return h, nil
}
// driversForNodeGroup returns drivers for a nodegroup instance
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
eg, _ := errgroup.WithContext(ctx)
dis := make([]build.DriverInfo, len(ng.Nodes))
var f driver.Factory
if ng.Driver != "" {
f = driver.GetFactory(ng.Driver, true)
if f == nil {
return nil, errors.Errorf("failed to find driver %q", f)
}
} else {
dockerapi, err := clientForEndpoint(dockerCli, ng.Nodes[0].Endpoint)
if err != nil {
return nil, err
}
f, err = driver.GetDefaultFactory(ctx, dockerapi, false)
if err != nil {
return nil, err
}
ng.Driver = f.Name()
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return nil, err
}
for i, n := range ng.Nodes {
func(i int, n store.Node) {
eg.Go(func() error {
di := build.DriverInfo{
Name: n.Name,
Platform: n.Platforms,
}
defer func() {
dis[i] = di
}()
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
if err != nil {
di.Err = err
return nil
}
// TODO: replace the following line with dockerclient.WithAPIVersionNegotiation option in clientForEndpoint
dockerapi.NegotiateAPIVersion(ctx)
contextStore := dockerCli.ContextStore()
var kcc driver.KubeClientConfig
kcc, err = configFromContext(n.Endpoint, contextStore)
if err != nil {
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
// try again with name="default".
// FIXME: n should retain real context name.
kcc, err = configFromContext("default", contextStore)
if err != nil {
logrus.Error(err)
}
}
tryToUseKubeConfigInCluster := false
if kcc == nil {
tryToUseKubeConfigInCluster = true
} else {
if _, err := kcc.ClientConfig(); err != nil {
tryToUseKubeConfigInCluster = true
}
}
if tryToUseKubeConfigInCluster {
kccInCluster := driver.KubeClientConfigInCluster{}
if _, err := kccInCluster.ClientConfig(); err == nil {
logrus.Debug("using kube config in cluster")
kcc = kccInCluster
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
if err != nil {
di.Err = err
return nil
}
di.Driver = d
di.ImageOpt = imageopt
return nil
})
}(i, n)
}
if err := eg.Wait(); err != nil {
return nil, err
}
return dis, nil
}
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") {
u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
}
rules := clientcmd.NewDefaultClientConfigLoadingRules()
apiConfig, err := rules.Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
}
return kubernetes.ConfigFromContext(endpointName, s)
}
// clientForEndpoint returns a docker client for an endpoint
func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClient, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
dep, ok := l.Endpoints["docker"]
if !ok {
return nil, errors.Errorf("context %q does not have a Docker endpoint", name)
}
epm, ok := dep.(docker.EndpointMeta)
if !ok {
return nil, errors.Errorf("endpoint %q is not of type EndpointMeta, %T", dep, dep)
}
ep, err := docker.WithTLSData(dockerCli.ContextStore(), name, epm)
if err != nil {
return nil, err
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
}
ep := docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: name,
},
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
var defaultOnly bool
if instance == "default" && instance != dockerCli.CurrentContext() {
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
}
if instance == "default" || instance == dockerCli.CurrentContext() {
instance = ""
defaultOnly = true
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == instance {
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
}
}
if instance != "" {
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
}
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
}
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
ng, err := txn.NodeGroupByName(instance)
if err != nil {
return nil, err
}
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
// getDefaultDrivers returns drivers based on current cli config
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
if !defaultOnly {
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return nil, err
}
if ng != nil {
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
if err != nil {
return nil, err
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
if err != nil {
return nil, err
}
return []build.DriverInfo{
{
Name: "default",
Driver: d,
ImageOpt: imageopt,
},
}, nil
}
func loadInfoData(ctx context.Context, d *dinfo) error {
if d.di.Driver == nil {
return nil
}
info, err := d.di.Driver.Info(ctx)
if err != nil {
return err
}
d.info = info
if info.Status == driver.Running {
c, err := d.di.Driver.Client(ctx)
if err != nil {
return err
}
workers, err := c.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "listing workers")
}
for _, w := range workers {
for _, p := range w.Platforms {
d.platforms = append(d.platforms, p)
}
}
d.platforms = platformutil.Dedupe(d.platforms)
}
return nil
}
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
eg, _ := errgroup.WithContext(ctx)
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
if err != nil {
return err
}
ngi.drivers = make([]dinfo, len(dis))
for i, di := range dis {
d := di
ngi.drivers[i].di = &d
func(d *dinfo) {
eg.Go(func() error {
if err := loadInfoData(ctx, d); err != nil {
d.err = err
}
return nil
})
}(&ngi.drivers[i])
}
if eg.Wait(); err != nil {
return err
}
kubernetesDriverCount := 0
for _, di := range ngi.drivers {
if di.info != nil && len(di.info.DynamicNodes) > 0 {
kubernetesDriverCount++
}
}
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
if isAllKubernetesDrivers {
var drivers []dinfo
var dynamicNodes []store.Node
for _, di := range ngi.drivers {
// dynamic nodes are used in Kubernetes driver.
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
if di.info != nil && len(di.info.DynamicNodes) > 0 {
for i := 0; i < len(di.info.DynamicNodes); i++ {
// all []dinfo share *build.DriverInfo and *driver.Info
diClone := di
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.platforms = pl
}
drivers = append(drivers, di)
}
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
}
}
// not append (remove the static nodes in the store)
ngi.ng.Nodes = dynamicNodes
ngi.drivers = drivers
ngi.ng.Dynamic = true
}
return nil
}
func dockerAPI(dockerCli command.Cli) *api {
return &api{dockerCli: dockerCli}
}
type api struct {
dockerCli command.Cli
}
func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
if name == "" {
name = a.dockerCli.CurrentContext()
}
return clientForEndpoint(a.dockerCli, name)
}
type dinfo struct {
di *build.DriverInfo
info *driver.Info
platforms []specs.Platform
err error
}
type nginfo struct {
ng *store.NodeGroup
drivers []dinfo
err error
}
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
toBoot := make([]int, 0, len(ngi.drivers))
for i, d := range ngi.drivers {
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
continue
}
if d.info.Status != driver.Running {
toBoot = append(toBoot, i)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
if err != nil {
ngi.drivers[idx].err = err
}
return nil
})
}(idx)
}
err := eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}

View File

@@ -1,14 +1,17 @@
variable "GO_VERSION" { variable "GO_VERSION" {
default = "1.19" default = "1.17"
}
variable "BIN_OUT" {
default = "./bin"
}
variable "RELEASE_OUT" {
default = "./release-out"
} }
variable "DOCS_FORMATS" { variable "DOCS_FORMATS" {
default = "md" default = "md"
} }
variable "DESTDIR" {
default = "./bin"
}
# Special target: https://github.com/docker/metadata-action#bake-definition // Special target: https://github.com/docker/metadata-action#bake-definition
target "meta-helper" { target "meta-helper" {
tags = ["docker/buildx-bin:local"] tags = ["docker/buildx-bin:local"]
} }
@@ -17,7 +20,6 @@ target "_common" {
args = { args = {
GO_VERSION = GO_VERSION GO_VERSION = GO_VERSION
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1 BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
BUILDX_EXPERIMENTAL = 1
} }
} }
@@ -87,20 +89,19 @@ target "mod-outdated" {
inherits = ["_common"] inherits = ["_common"]
dockerfile = "./hack/dockerfiles/vendor.Dockerfile" dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
target = "outdated" target = "outdated"
no-cache-filter = ["outdated"]
output = ["type=cacheonly"] output = ["type=cacheonly"]
} }
target "test" { target "test" {
inherits = ["_common"] inherits = ["_common"]
target = "test-coverage" target = "test-coverage"
output = ["${DESTDIR}/coverage"] output = ["./coverage"]
} }
target "binaries" { target "binaries" {
inherits = ["_common"] inherits = ["_common"]
target = "binaries" target = "binaries"
output = ["${DESTDIR}/build"] output = [BIN_OUT]
platforms = ["local"] platforms = ["local"]
} }
@@ -124,7 +125,7 @@ target "binaries-cross" {
target "release" { target "release" {
inherits = ["binaries-cross"] inherits = ["binaries-cross"]
target = "release" target = "release"
output = ["${DESTDIR}/release"] output = [RELEASE_OUT]
} }
target "image" { target "image" {

View File

@@ -16,7 +16,6 @@ import (
_ "github.com/docker/buildx/driver/docker" _ "github.com/docker/buildx/driver/docker"
_ "github.com/docker/buildx/driver/docker-container" _ "github.com/docker/buildx/driver/docker-container"
_ "github.com/docker/buildx/driver/kubernetes" _ "github.com/docker/buildx/driver/kubernetes"
_ "github.com/docker/buildx/driver/remote"
) )
const defaultSourcePath = "docs/reference/" const defaultSourcePath = "docs/reference/"

View File

@@ -1,48 +0,0 @@
# CI/CD
## GitHub Actions
Docker provides a [GitHub Action that will build and push your image](https://github.com/docker/build-push-action/#about)
using Buildx. Here is a simple workflow:
```yaml
name: ci
on:
push:
branches:
- 'main'
jobs:
docker:
runs-on: ubuntu-latest
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v2
with:
push: true
tags: user/app:latest
```
In this example we are also using 3 other actions:
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
default the `docker-container` [builder driver](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
to add emulation support with QEMU to be able to build against more platforms.
* [`login`](https://github.com/docker/login-action) action will take care to log
in against a Docker registry.

View File

@@ -1,23 +0,0 @@
# CNI networking
It can be useful to use a bridge network for your builder if for example you
encounter a network port contention during multiple builds. If you're using
the BuildKit image, CNI is not yet available in it, but you can create
[a custom BuildKit image with CNI support](https://github.com/moby/buildkit/blob/master/docs/cni-networking.md).
Now build this image:
```console
$ docker buildx build --tag buildkit-cni:local --load .
```
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/) that
will use this image:
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--driver-opt "image=buildkit-cni:local" \
--buildkitd-flags "--oci-worker-net=cni"
```

View File

@@ -1,20 +0,0 @@
# Color output controls
Buildx has support for modifying the colors that are used to output information
to the terminal. You can set the environment variable `BUILDKIT_COLORS` to
something like `run=123,20,245:error=yellow:cancel=blue:warning=white` to set
the colors that you would like to use:
![Progress output custom colors](https://user-images.githubusercontent.com/1951866/180584033-24522385-cafd-4a54-a4a2-18f5ce74eb27.png)
Setting `NO_COLOR` to anything will disable any colorized output as recommended
by [no-color.org](https://no-color.org/):
![Progress output no color](https://user-images.githubusercontent.com/1951866/180584037-e28f9997-dd4c-49cf-8b26-04864815de19.png)
> **Note**
>
> Parsing errors will be reported but ignored. This will result in default
> color values being used where needed.
See also [the list of pre-defined colors](https://github.com/moby/buildkit/blob/master/util/progress/progressui/colors.go).

View File

@@ -1,34 +0,0 @@
# Using a custom network
[Create a network](https://docs.docker.com/engine/reference/commandline/network_create/)
named `foonet`:
```console
$ docker network create foonet
```
[Create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
named `mybuilder` that will use this network:
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--driver-opt "network=foonet"
```
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
```console
$ docker buildx inspect --bootstrap
```
[Inspect the builder container](https://docs.docker.com/engine/reference/commandline/inspect/)
and see what network is being used:
{% raw %}
```console
$ docker inspect buildx_buildkit_mybuilder0 --format={{.NetworkSettings.Networks}}
map[foonet:0xc00018c0c0]
```
{% endraw %}

View File

@@ -1,63 +0,0 @@
# Using a custom registry configuration
If you [create a `docker-container` or `kubernetes` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/) and
have specified certificates for registries in the [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
the files will be copied into the container under `/etc/buildkit/certs` and
configuration will be updated to reflect that.
Take the following `buildkitd.toml` configuration that will be used for
pushing an image to this registry using self-signed certificates:
```toml
# /etc/buildkitd.toml
debug = true
[registry."myregistry.com"]
ca=["/etc/certs/myregistry.pem"]
[[registry."myregistry.com".keypair]]
key="/etc/certs/myregistry_key.pem"
cert="/etc/certs/myregistry_cert.pem"
```
Here we have configured a self-signed certificate for `myregistry.com` registry.
Now [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
that will use this BuildKit configuration:
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--config /etc/buildkitd.toml
```
Inspecting the builder container, you can see that buildkitd configuration
has changed:
```console
$ docker exec -it buildx_buildkit_mybuilder0 cat /etc/buildkit/buildkitd.toml
```
```toml
debug = true
[registry]
[registry."myregistry.com"]
ca = ["/etc/buildkit/certs/myregistry.com/myregistry.pem"]
[[registry."myregistry.com".keypair]]
cert = "/etc/buildkit/certs/myregistry.com/myregistry_cert.pem"
key = "/etc/buildkit/certs/myregistry.com/myregistry_key.pem"
```
And certificates copied inside the container:
```console
$ docker exec -it buildx_buildkit_mybuilder0 ls /etc/buildkit/certs/myregistry.com/
myregistry.pem myregistry_cert.pem myregistry_key.pem
```
Now you should be able to push to the registry with this builder:
```console
$ docker buildx build --push --tag myregistry.com/myimage:latest .
```

View File

@@ -1,31 +0,0 @@
# OpenTelemetry support
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
`JAEGER_TRACE` environment variable to the collection address using a `driver-opt`.
First create a Jaeger container:
```console
$ docker run -d --name jaeger -p "6831:6831/udp" -p "16686:16686" jaegertracing/all-in-one
```
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
that will use the Jaeger instance via the `JAEGER_TRACE` env var:
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--driver-opt "network=host" \
--driver-opt "env.JAEGER_TRACE=localhost:6831"
```
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
```console
$ docker buildx inspect --bootstrap
```
Buildx commands should be traced at `http://127.0.0.1:16686/`:
![OpenTelemetry Buildx Bake](https://user-images.githubusercontent.com/1951866/124468052-ef085400-dd98-11eb-84ab-7ac8e261dd52.png)

View File

@@ -1,62 +0,0 @@
# Registry mirror
You can define a registry mirror to use for your builds by providing a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
while creating a builder with the [`--config` flags](https://docs.docker.com/engine/reference/commandline/buildx_create/#config).
```toml
# /etc/buildkitd.toml
debug = true
[registry."docker.io"]
mirrors = ["mirror.gcr.io"]
```
> **Note**
>
> `debug = true` has been added to be able to debug requests
> in the BuildKit daemon and see if the mirror is effectively used.
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
that will use this BuildKit configuration:
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--config /etc/buildkitd.toml
```
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
```console
$ docker buildx inspect --bootstrap
```
Build an image:
```console
$ docker buildx build --load . -f-<<EOF
FROM alpine
RUN echo "hello world"
EOF
```
Now let's check the BuildKit logs in the builder container:
```console
$ docker logs buildx_buildkit_mybuilder0
```
```text
...
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1469 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"774380abda8f4eae9a149e5d5d3efc83\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:57 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788077652182 response.header.x-goog-hash="crc32c=V3DSrg==" response.header.x-goog-hash.1="md5=d0OAq9qPTq6aFJ5dXT78gw==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1469 response.header.x-guploader-uploadid=ADPycduqQipVAXc3tzXmTzKQ2gTT6CV736B2J628smtD1iDytEyiYCgvvdD8zz9BT1J1sASUq9pW_ctUyC4B-v2jvhIxnZTlKg response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=760 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1471 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:35:13 GMT" response.header.etag="\"35d688bd15327daafcdb4d4395e616a8\"" response.header.expires="Sun, 06 Feb 2022 18:35:13 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:12 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788032100793 response.header.x-goog-hash="crc32c=aWgRjA==" response.header.x-goog-hash.1="md5=NdaIvRUyfar8201DleYWqA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1471 response.header.x-guploader-uploadid=ADPycdtR-gJYwC7yHquIkJWFFG8FovDySvtmRnZBqlO3yVDanBXh_VqKYt400yhuf0XbQ3ZMB9IZV2vlcyHezn_Pu3a1SMMtiw response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.image.rootfs.diff.tar.gzip, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=2818413 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"1d55e7be5a77c4a908ad11bc33ebea1c\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:06 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788026431708 response.header.x-goog-hash="crc32c=ZojF+g==" response.header.x-goog-hash.1="md5=HVXnvlp3xKkIrRG8M+vqHA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=2818413 response.header.x-guploader-uploadid=ADPycdsebqxiTBJqZ0bv9zBigjFxgQydD2ESZSkKchpE0ILlN9Ibko3C5r4fJTJ4UR9ddp-UBd-2v_4eRpZ8Yo2llW_j4k8WhQ response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
...
```
As you can see, requests come from the GCR registry mirror (`response.header.x-goog*`).

View File

@@ -1,33 +0,0 @@
# Resource limiting
## Max parallelism
You can limit the parallelism of the BuildKit solver, which is particularly useful
for low-powered machines, using a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
while creating a builder with the [`--config` flags](https://docs.docker.com/engine/reference/commandline/buildx_create/#config).
```toml
# /etc/buildkitd.toml
[worker.oci]
max-parallelism = 4
```
Now you can [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
that will use this BuildKit configuration to limit parallelism.
```console
$ docker buildx create --use \
--name mybuilder \
--driver docker-container \
--config /etc/buildkitd.toml
```
## Limit on TCP connections
We are also now limiting TCP connections to **4 per registry** with an additional
connection not used for layer pulls and pushes. This limitation will be able to
manage TCP connection per host to avoid your build being stuck while pulling
images. The additional connection is used for metadata requests
(image config retrieval) to enhance the overall build time.
More info: [moby/buildkit#2259](https://github.com/moby/buildkit/pull/2259)

View File

@@ -1,14 +0,0 @@
# Buildx manuals 📚
This directory contains a bunch of useful docs for how to use Buildx features.
> **Note**
>
> The markdown files in this directory (excluding this README) are reused
> downstream by the
> [Docker documentation repository](https://github.com/docker/docs).
>
> If you wish to contribute to these docs, be sure to first review the
> [documentation contribution guidelines](https://docs.docker.com/contribute/overview/).
>
> Thank you!

View File

@@ -1,3 +0,0 @@
# Defining additional build contexts and linking targets
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/build-contexts)

View File

@@ -1,3 +0,0 @@
# Building from Compose file
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/compose-file)

View File

@@ -1,3 +0,0 @@
# Configuring builds
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/configuring-build)

View File

@@ -1,3 +0,0 @@
# Bake file definition
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/file-definition)

View File

@@ -1,3 +0,0 @@
# User defined HCL functions
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/hcl-funcs)

View File

@@ -1,3 +0,0 @@
# High-level build options with Bake
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake)

View File

@@ -1,3 +0,0 @@
# Azure Blob Storage cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/azblob)

View File

@@ -1,3 +0,0 @@
# GitHub Actions cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/gha)

View File

@@ -1,3 +0,0 @@
# Cache storage backends
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends)

View File

@@ -1,3 +0,0 @@
# Inline cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/inline)

View File

@@ -1,3 +0,0 @@
# Local cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/local)

View File

@@ -1,3 +0,0 @@
# Registry cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/registry)

View File

@@ -1,3 +0,0 @@
# Amazon S3 cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/s3)

View File

@@ -1,3 +0,0 @@
# Docker container driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/docker-container)

View File

@@ -1,3 +0,0 @@
# Docker driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/docker)

View File

@@ -1,3 +0,0 @@
# Buildx drivers overview
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers)

View File

@@ -1,3 +0,0 @@
# Kubernetes driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/kubernetes)

View File

@@ -1,3 +0,0 @@
# Remote driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/remote)

View File

@@ -1,3 +0,0 @@
# Image and registry exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/image-registry)

View File

@@ -1,3 +0,0 @@
# Exporters overview
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters)

View File

@@ -1,3 +0,0 @@
# Local and tar exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/local-tar)

View File

@@ -1,3 +0,0 @@
# OCI and Docker exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/oci-docker)

View File

@@ -29,9 +29,9 @@ Extended build capabilities with BuildKit
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -9,24 +9,22 @@ Build from a file
### Aliases ### Aliases
`docker buildx bake`, `docker buildx f` `bake`, `f`
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file | | [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
| `--load` | | | Shorthand for `--set=*.output=type=docker` | | `--load` | Shorthand for `--set=*.output=type=docker` |
| `--metadata-file` | `string` | | Write build result metadata to the file | | `--metadata-file string` | Write build result metadata to the file |
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image | | [`--no-cache`](#no-cache) | Do not use cache when building the image |
| [`--print`](#print) | | | Print the options without building | | [`--print`](#print) | Print the options without building |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
| `--provenance` | `string` | | Shorthand for `--set=*.attest=type=provenance` | | [`--pull`](#pull) | Always attempt to pull a newer version of the image |
| [`--pull`](#pull) | | | Always attempt to pull all referenced images | | `--push` | Shorthand for `--set=*.output=type=registry` |
| `--push` | | | Shorthand for `--set=*.output=type=registry` | | [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) |
| `--sbom` | `string` | | Shorthand for `--set=*.attest=type=sbom` |
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -36,14 +34,12 @@ Build from a file
Bake is a high-level build command. Each specified target will run in parallel Bake is a high-level build command. Each specified target will run in parallel
as part of the build. as part of the build.
Read [High-level build options with Bake](https://docs.docker.com/build/customize/bake/) Read [High-level build options](https://github.com/docker/buildx#high-level-build-options)
guide for introduction to writing bake files. for introduction.
> **Note** Please note that `buildx bake` command may receive backwards incompatible
> features in the future if needed. We are looking for feedback on improving the
> `buildx bake` command may receive backwards incompatible features in the future command and extending the functionality further.
> if needed. We are looking for feedback on improving the command and extending
> the functionality further.
## Examples ## Examples
@@ -53,42 +49,166 @@ Same as [`buildx --builder`](buildx.md#builder).
### <a name="file"></a> Specify a build definition file (-f, --file) ### <a name="file"></a> Specify a build definition file (-f, --file)
Use the `-f` / `--file` option to specify the build definition file to use. By default, `buildx bake` looks for build definition files in the current
The file can be an HCL, JSON or Compose file. If multiple files are specified directory, the following are parsed:
- `docker-compose.yml`
- `docker-compose.yaml`
- `docker-bake.json`
- `docker-bake.override.json`
- `docker-bake.hcl`
- `docker-bake.override.hcl`
Use the `-f` / `--file` option to specify the build definition file to use. The
file can be a Docker Compose, JSON or HCL file. If multiple files are specified
they are all read and configurations are combined. they are all read and configurations are combined.
You can pass the names of the targets to build, to build only specific target(s). The following example uses a Docker Compose file named `docker-compose.dev.yaml`
The following example builds the `db` and `webapp-release` targets that are as build definition file, and builds all targets in the file:
defined in the `docker-bake.dev.hcl` file:
```hcl ```console
# docker-bake.dev.hcl $ docker buildx bake -f docker-compose.dev.yaml
group "default" {
targets = ["db", "webapp-dev"] [+] Building 66.3s (30/30) FINISHED
=> [frontend internal] load build definition from Dockerfile 0.1s
=> => transferring dockerfile: 36B 0.0s
=> [backend internal] load build definition from Dockerfile 0.2s
=> => transferring dockerfile: 3.73kB 0.0s
=> [database internal] load build definition from Dockerfile 0.1s
=> => transferring dockerfile: 5.77kB 0.0s
...
```
Pass the names of the targets to build, to build only specific target(s). The
following example builds the `backend` and `database` targets that are defined
in the `docker-compose.dev.yaml` file, skipping the build for the `frontend`
target:
```console
$ docker buildx bake -f docker-compose.dev.yaml backend database
[+] Building 2.4s (13/13) FINISHED
=> [backend internal] load build definition from Dockerfile 0.1s
=> => transferring dockerfile: 81B 0.0s
=> [database internal] load build definition from Dockerfile 0.2s
=> => transferring dockerfile: 36B 0.0s
=> [backend internal] load .dockerignore 0.3s
...
```
You can also use a remote `git` bake definition:
```console
$ docker buildx bake "git://github.com/docker/cli#v20.10.11" --print
#1 [internal] load git source git://github.com/docker/cli#v20.10.11
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
#1 2.022 From git://github.com/docker/cli
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
#1 DONE 2.9s
{
"group": {
"default": {
"targets": [
"binary"
]
}
},
"target": {
"binary": {
"context": "git://github.com/docker/cli#v20.10.11",
"dockerfile": "Dockerfile",
"args": {
"BASE_VARIANT": "alpine",
"GO_STRIP": "",
"VERSION": ""
},
"target": "binary",
"platforms": [
"local"
],
"output": [
"build"
]
}
}
} }
```
target "webapp-dev" { As you can see the context is fixed to `git://github.com/docker/cli` even if
dockerfile = "Dockerfile.webapp" [no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
tags = ["docker.io/username/webapp"] in the definition.
}
target "webapp-release" { If you want to access the main context for bake command from a bake file
inherits = ["webapp-dev"] that has been imported remotely, you can use the `BAKE_CMD_CONTEXT` builtin var:
platforms = ["linux/amd64", "linux/arm64"]
}
target "db" { ```console
dockerfile = "Dockerfile.db" $ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
tags = ["docker.io/username/db"] target "default" {
context = BAKE_CMD_CONTEXT
dockerfile-inline = <<EOT
FROM alpine
WORKDIR /src
COPY . .
RUN ls -l && stop
EOT
} }
``` ```
```console ```console
$ docker buildx bake -f docker-bake.dev.hcl db webapp-release $ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
{
"target": {
"default": {
"context": ".",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
``` ```
See our [file definition](https://docs.docker.com/build/customize/bake/file-definition/) ```console
guide for more details. $ touch foo bar
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
...
> [4/4] RUN ls -l && stop:
#8 0.101 total 0
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
#8 0.102 /bin/sh: stop: not found
```
```console
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11" --print
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
#1 CACHED
{
"target": {
"default": {
"context": "git://github.com/docker/cli#v20.10.11",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
```
```console
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11"
...
> [4/4] RUN ls -l && stop:
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
#8 0.136 /bin/sh: stop: not found
```
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache) ### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
@@ -123,7 +243,27 @@ $ docker buildx bake -f docker-bake.hcl --print db
### <a name="progress"></a> Set type of progress output (--progress) ### <a name="progress"></a> Set type of progress output (--progress)
Same as [`build --progress`](buildx_build.md#progress). Same as [`build --progress`](buildx_build.md#progress). Set type of progress
output (auto, plain, tty). Use plain to show container output (default "auto").
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
The following example uses `plain` output during the build:
```console
$ docker buildx bake --progress=plain
#2 [backend internal] load build definition from Dockerfile.test
#2 sha256:de70cb0bb6ed8044f7b9b1b53b67f624e2ccfb93d96bb48b70c1fba562489618
#2 ...
#1 [database internal] load build definition from Dockerfile.test
#1 sha256:453cb50abd941762900a1212657a35fc4aad107f5d180b0ee9d93d6b74481bce
#1 transferring dockerfile: 36B done
#1 DONE 0.1s
...
```
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull) ### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
@@ -138,6 +278,9 @@ Same as `build --pull`.
Override target configurations from command line. The pattern matching syntax Override target configurations from command line. The pattern matching syntax
is defined in https://golang.org/pkg/path/#Match. is defined in https://golang.org/pkg/path/#Match.
**Examples**
```console ```console
$ docker buildx bake --set target.args.mybuildarg=value $ docker buildx bake --set target.args.mybuildarg=value
$ docker buildx bake --set target.platform=linux/arm64 $ docker buildx bake --set target.platform=linux/arm64
@@ -147,20 +290,609 @@ $ docker buildx bake --set foo*.no-cache # bypass caching only for
``` ```
Complete list of overridable fields: Complete list of overridable fields:
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `labels`, `no-cache`,
`output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
* `args` ### File definition
* `cache-from`
* `cache-to` In addition to compose files, bake supports a JSON and an equivalent HCL file
* `context` format for defining build groups and targets.
* `dockerfile`
* `labels` A target reflects a single docker build invocation with the same options that
* `no-cache` you would specify for `docker build`. A group is a grouping of targets.
* `no-cache-filter`
* `output` Multiple files can include the same target and final build options will be
* `platform` determined by merging them together.
* `pull`
* `push` In the case of compose files, each service corresponds to a target.
* `secrets`
* `ssh` A group can specify its list of targets with the `targets` option. A target can
* `tags` inherit build options by setting the `inherits` option to the list of targets or
* `target` groups to inherit from.
Note: Design of bake command is work in progress, the user experience may change
based on feedback.
**Example HCL definition**
```hcl
group "default" {
targets = ["db", "webapp-dev"]
}
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp"]
}
target "webapp-release" {
inherits = ["webapp-dev"]
platforms = ["linux/amd64", "linux/arm64"]
}
target "db" {
dockerfile = "Dockerfile.db"
tags = ["docker.io/username/db"]
}
```
Complete list of valid target fields:
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
### Global scope attributes
You can define global scope attributes in HCL/JSON and use them for code reuse
and setting values for variables. This means you can do a "data-only" HCL file
with the values you want to set/override and use it in the list of regular
output files.
```hcl
# docker-bake.hcl
variable "FOO" {
default = "abc"
}
target "app" {
args = {
v1 = "pre-${FOO}"
}
}
```
You can use this file directly:
```console
$ docker buildx bake --print app
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-abc"
}
}
}
}
```
Or create an override configuration file:
```hcl
# env.hcl
WHOAMI="myuser"
FOO="def-${WHOAMI}"
```
And invoke bake together with both of the files:
```console
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-def-myuser"
}
}
}
}
```
### HCL variables and functions
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
the HCL file format also supports variable block definitions. These can be used
to define variables with values provided by the current environment, or a
default value when unset.
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
are also supported.
#### Using interpolation to tag an image with the git sha
Bake supports variable blocks which are assigned to matching environment
variables or default values.
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
tags = ["docker.io/username/webapp:${TAG}"]
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"docker.io/username/webapp:latest"
]
}
}
}
```
```console
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"docker.io/username/webapp:985e9e9"
]
}
}
}
```
#### Using the `add` function
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
Here we are using the `add` function.
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
args = {
buildno = "${add(123, 1)}"
}
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
}
}
}
}
```
#### Defining an `increment` function
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
The following example defines a simple an `increment` function.
```hcl
# docker-bake.hcl
function "increment" {
params = [number]
result = number + 1
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
args = {
buildno = "${increment(123)}"
}
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
}
}
}
}
```
#### Only adding tags if a variable is not empty using an `notequal`
Here we are using the conditional `notequal` function which is just for
symmetry with the `equal` one.
```hcl
# docker-bake.hcl
variable "TAG" {default="" }
group "default" {
targets = [
"webapp",
]
}
target "webapp" {
context="."
dockerfile="Dockerfile"
tags = [
"my-image:latest",
notequal("",TAG) ? "my-image:${TAG}": "",
]
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"my-image:latest"
]
}
}
}
```
#### Using variables in functions
You can refer variables to other variables like the target blocks can. Stdlib
functions can also be called but user functions can't at the moment.
```hcl
# docker-bake.hcl
variable "REPO" {
default = "user/repo"
}
function "tag" {
params = [tag]
result = ["${REPO}:${tag}"]
}
target "webapp" {
tags = tag("v1")
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"user/repo:v1"
]
}
}
}
```
#### Using variables in variables across files
When multiple files are specified, one file can use variables defined in
another file.
```hcl
# docker-bake1.hcl
variable "FOO" {
default = upper("${BASE}def")
}
variable "BAR" {
default = "-${FOO}-"
}
target "app" {
args = {
v1 = "pre-${BAR}"
}
}
```
```hcl
# docker-bake2.hcl
variable "BASE" {
default = "abc"
}
target "app" {
args = {
v2 = "${FOO}-post"
}
}
```
```console
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre--ABCDEF-",
"v2": "ABCDEF-post"
}
}
}
}
```
#### Using typed variables
Non-string variables are also accepted. The value passed with env is parsed
into suitable type first.
```hcl
# docker-bake.hcl
variable "FOO" {
default = 3
}
variable "IS_FOO" {
default = true
}
target "app" {
args = {
v1 = FOO > 5 ? "higher" : "lower"
v2 = IS_FOO ? "yes" : "no"
}
}
```
```console
$ docker buildx bake --print app
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "lower",
"v2": "yes"
}
}
}
}
```
### Extension field with Compose
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
field `x-bake` can be used in your compose file to evaluate fields that are not
(yet) available in the [build definition](https://github.com/compose-spec/compose-spec/blob/master/build.md#build-definition).
```yaml
# docker-compose.yml
services:
addon:
image: ct-addon:bar
build:
context: .
dockerfile: ./Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
tags:
- ct-addon:foo
- ct-addon:alp
platforms:
- linux/amd64
- linux/arm64
cache-from:
- user/app:cache
- type=local,src=path/to/cache
cache-to: type=local,dest=path/to/cache
pull: true
aws:
image: ct-fake-aws:bar
build:
dockerfile: ./aws.Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
secret:
- id=mysecret,src=./secret
- id=mysecret2,src=./secret2
platforms: linux/arm64
output: type=docker
no-cache: true
```
```console
$ docker buildx bake --print
{
"group": {
"default": {
"targets": [
"aws",
"addon"
]
}
},
"target": {
"addon": {
"context": ".",
"dockerfile": "./Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-addon:foo",
"ct-addon:alp"
],
"cache-from": [
"user/app:cache",
"type=local,src=path/to/cache"
],
"cache-to": [
"type=local,dest=path/to/cache"
],
"platforms": [
"linux/amd64",
"linux/arm64"
],
"pull": true
},
"aws": {
"context": ".",
"dockerfile": "./aws.Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-fake-aws:bar"
],
"secret": [
"id=mysecret,src=./secret",
"id=mysecret2,src=./secret2"
],
"platforms": [
"linux/arm64"
],
"output": [
"type=docker"
],
"no-cache": true
}
}
}
```
Complete list of valid fields for `x-bake`:
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
`pull`, `no-cache`
### Built-in variables
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
from a bake file that has been [imported remotely](#file).
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
specification (e.g. `linux/amd64`).

View File

@@ -9,52 +9,42 @@ Start a build
### Aliases ### Aliases
`docker buildx build`, `docker buildx b` `build`, `b`
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) | | [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) |
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) | | [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) | | [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables | | [`--builder string`](#builder) | Override the configured builder instance |
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) | | [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) | | [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container |
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) | | [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container | | `--iidfile string` | Write the image ID to the file |
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) | | `--label stringArray` | Set metadata for an image |
| `--iidfile` | `string` | | Write the image ID to the file | | [`--load`](#load) | Shorthand for `--output=type=docker` |
| `--invoke` | `string` | | Invoke a command after the build [experimental] | | `--metadata-file string` | Write build result metadata to the file |
| `--label` | `stringArray` | | Set metadata for an image | | `--network string` | Set the networking mode for the RUN instructions during build |
| [`--load`](#load) | | | Shorthand for `--output=type=docker` | | `--no-cache` | Do not use cache when building the image |
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file | | [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) |
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build | | [`--platform stringArray`](#platform) | Set target platform for build |
| `--no-cache` | | | Do not use cache when building the image | | [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages | | `--pull` | Always attempt to pull a newer version of the image |
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) | | [`--push`](#push) | Shorthand for `--output=type=registry` |
| [`--platform`](#platform) | `stringArray` | | Set target platform for build | | `-q`, `--quiet` | Suppress the build output and print image ID on success |
| `--print` | `string` | | Print result of information request (e.g., outline, targets) [experimental] | | `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` |
| `--provenance` | `string` | | Shortand for `--attest=type=provenance` | | `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| `--pull` | | | Always attempt to pull all referenced images | | [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
| [`--push`](#push) | | | Shorthand for `--output=type=registry` | | [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success | | [`--ulimit ulimit`](#ulimit) | Ulimit options |
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
| [`--target`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | `string` | | Set the target build stage to build |
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
Flags marked with `[experimental]` need to be explicitly enabled by setting the
`BUILDX_EXPERIMENTAL=1` environment variable.
## Description ## Description
The `buildx build` command starts a build using BuildKit. This command is similar The `buildx build` command starts a build using BuildKit. This command is similar
@@ -62,236 +52,80 @@ to the UI of `docker build` command and takes the same flags and arguments.
For documentation on most of these flags, refer to the [`docker build` For documentation on most of these flags, refer to the [`docker build`
documentation](https://docs.docker.com/engine/reference/commandline/build/). In documentation](https://docs.docker.com/engine/reference/commandline/build/). In
here we'll document a subset of the new flags. here well document a subset of the new flags.
## Examples ## Examples
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
```
--allow=ENTITLEMENT
```
Allow extra privileged entitlement. List of entitlements:
- `network.host` - Allows executions with host networking.
- `security.insecure` - Allows executions without sandbox. See
[related Dockerfile extensions](https://docs.docker.com/engine/reference/builder/#run---securitysandbox).
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
**Examples**
```console
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
$ docker buildx build --allow security.insecure .
```
### <a name="build-arg"></a> Set build-time variables (--build-arg)
Same as [`docker build` command](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg).
There are also useful built-in build args like:
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into deterministic output regardless of multi-platform output or not
```console
$ docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
```
> **Note**
>
> More built-in build args can be found in [Dockerfile reference docs](https://docs.docker.com/engine/reference/builder/#buildkit-built-in-build-args).
### <a name="build-context"></a> Additional build contexts (--build-context)
```
--build-context=name=VALUE
```
Define additional build context with specified contents. In Dockerfile the context can be accessed when `FROM name` or `--from=name` is used.
When Dockerfile defines a stage with the same name it is overwritten.
The value can be a local source directory, [local OCI layout compliant directory](https://github.com/opencontainers/image-spec/blob/main/image-layout.md), container image (with docker-image:// prefix), Git or HTTP URL.
Replace `alpine:latest` with a pinned one:
```console
$ docker buildx build --build-context alpine=docker-image://alpine@sha256:0123456789 .
```
Expose a secondary local source directory:
```console
$ docker buildx build --build-context project=path/to/project/source .
# docker buildx build --build-context project=https://github.com/myuser/project.git .
```
```dockerfile
# syntax=docker/dockerfile:1
FROM alpine
COPY --from=project myfile /
```
#### <a name="source-oci-layout"></a> Source image from OCI layout directory
Source an image from a local [OCI layout compliant directory](https://github.com/opencontainers/image-spec/blob/main/image-layout.md):
```console
$ docker buildx build --build-context foo=oci-layout:///path/to/local/layout@sha256:abcd12345 .
```
```dockerfile
# syntax=docker/dockerfile:1
FROM alpine
RUN apk add git
COPY --from=foo myfile /
FROM foo
```
The OCI layout directory must be compliant with the [OCI layout specification](https://github.com/opencontainers/image-spec/blob/main/image-layout.md). It looks _solely_ for hashes. It does not
do any form of `image:tag` resolution to find the hash of the manifest; that is up to you.
The format of the `--build-context` must be: `<context>=oci-layout://<path-to-local-layout>@sha256:<hash-of-manifest>`, where:
* `context` is the name of the build context as used in the `Dockerfile`.
* `path-to-local-layout` is the path on the local machine, where you are running `docker build`, to the spec-compliant OCI layout.
* `hash-of-manifest` is the hash of the manifest for the image. It can be a single-architecture manifest or a multi-architecture index.
### <a name="builder"></a> Override the configured builder instance (--builder) ### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder). Same as [`buildx --builder`](buildx.md#builder).
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from) ### <a name="platform"></a> Set the target platforms for the build (--platform)
``` ```
--cache-from=[NAME|type=TYPE[,KEY=VALUE]] --platform=value[,value]
``` ```
Use an external cache source for a build. Supported types are `registry`, Set the target platform for the build. All `FROM` commands inside the Dockerfile
`local`, `gha` and `s3`. without their own `--platform` flag will pull base images for this platform and
this value will also be the platform of the resulting image. The default value
will be the current platform of the buildkit daemon.
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) When using `docker-container` driver with `buildx`, this flag can accept multiple
can import cache from a cache manifest or (special) image configuration on the values as an input separated by a comma. With multiple values the result will be
registry. built for all of the specified platforms and joined together into a single manifest
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can list.
import cache from local files previously exported with `--cache-to`.
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
GitHub repository
- [`s3` source](https://github.com/moby/buildkit#s3-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
S3 bucket
If no type is specified, `registry` exporter is used with a specified reference. If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
support for the specified platform. In a clean setup, you can only execute `RUN`
commands for your system architecture.
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
launchers for secondary architectures, buildx will pick them up automatically.
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
and `arm` architectures. You can see what runtime platforms your current builder
instance supports by running `docker buildx inspect --bootstrap`.
`docker` driver currently only supports importing build cache from the registry. Inside a `Dockerfile`, you can access the current platform value through
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
for the full description of automatic platform argument variants .
The formatting for the platform specifier is defined in the [containerd source
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
**Examples**
```console ```console
$ docker buildx build --cache-from=user/app:cache . $ docker buildx build --platform=linux/arm64 .
$ docker buildx build --cache-from=user/app . $ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
$ docker buildx build --cache-from=type=registry,ref=user/app . $ docker buildx build --platform=darwin .
$ docker buildx build --cache-from=type=local,src=path/to/cache .
$ docker buildx build --cache-from=type=gha .
$ docker buildx build --cache-from=type=s3,region=eu-west-1,bucket=mybucket .
``` ```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache ### <a name="progress"></a> Set type of progress output (--progress)
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
``` ```
--cache-to=[NAME|type=TYPE[,KEY=VALUE]] --progress=VALUE
``` ```
Export build cache to an external cache destination. Supported types are Set type of progress output (auto, plain, tty). Use plain to show container
`registry`, `local`, `inline`, `gha` and `s3`. output (default "auto").
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry. > You can also use the `BUILDKIT_PROGRESS` environment variable to set
- [`local` type](https://github.com/moby/buildkit#local-directory-1) exports > its value.
cache to a local directory on the client.
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
writes the cache metadata into the image configuration.
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
exports cache through the [GitHub Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
- [`s3` type](https://github.com/moby/buildkit#s3-cache-experimental) exports
cache to a S3 bucket.
`docker` driver currently only supports exporting inline cache metadata to image The following example uses `plain` output during the build:
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
to trigger inline cache exporter.
Attribute key:
- `mode` - Specifies how many layers are exported with the cache. `min` on only
exports layers already in the final build stage, `max` exports layers for
all stages. Metadata is always exported for the whole build.
```console ```console
$ docker buildx build --cache-to=user/app:cache . $ docker buildx build --load --progress=plain .
$ docker buildx build --cache-to=type=inline .
$ docker buildx build --cache-to=type=registry,ref=user/app .
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
$ docker buildx build --cache-to=type=gha .
$ docker buildx build --cache-to=type=s3,region=eu-west-1,bucket=mybucket .
```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache #1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 227B 0.0s done
#1 DONE 0.1s
### <a name="load"></a> Load the single-platform build result to `docker images` (--load) #2 [internal] load .dockerignore
#2 transferring context: 129B 0.0s done
Shorthand for [`--output=type=docker`](#docker). Will automatically load the #2 DONE 0.0s
single-platform build result to `docker images`. ...
### <a name="metadata-file"></a> Write build result metadata to the file (--metadata-file)
To output build metadata such as the image digest, pass the `--metadata-file` flag.
The metadata will be written as a JSON object to the specified file. The
directory of the specified file must already exist and be writable.
```console
$ docker buildx build --load --metadata-file metadata.json .
$ cat metadata.json
```
```json
{
"containerimage.buildinfo": {
"frontend": "dockerfile.v0",
"attrs": {
"context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
"filename": "Dockerfile",
"source": "docker/dockerfile:master"
},
"sources": [
{
"type": "docker-image",
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
},
{
"type": "docker-image",
"ref": "docker.io/library/alpine:3.13",
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
}
]
},
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
"containerimage.descriptor": {
"annotations": {
"config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
"org.opencontainers.image.created": "2022-02-08T21:28:03Z"
},
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 506
},
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3"
}
``` ```
### <a name="output"></a> Set the export action for the build result (-o, --output) ### <a name="output"></a> Set the export action for the build result (-o, --output)
@@ -312,6 +146,8 @@ If just the path is specified as a value, `buildx` will use the local exporter
with this path as the destination. If the value is "-", `buildx` will use `tar` with this path as the destination. If the value is "-", `buildx` will use `tar`
exporter and write to `stdout`. exporter and write to `stdout`.
**Examples**
```console ```console
$ docker buildx build -o . . $ docker buildx build -o . .
$ docker buildx build -o outdir . $ docker buildx build -o outdir .
@@ -365,7 +201,7 @@ The most common usecase for multi-platform images is to directly push to a regis
Attribute keys: Attribute keys:
- `dest` - destination path where tarball will be written. If not specified the - `dest` - destination path where tarball will be written. If not specified the
tar will be loaded automatically to the current docker instance. tar will be loaded automatically to the current docker instance.
- `context` - name for the docker context where to import the result - `context` - name for the docker context where to import the result
#### `image` #### `image`
@@ -383,169 +219,118 @@ Attribute keys:
The `registry` exporter is a shortcut for `type=image,push=true`. The `registry` exporter is a shortcut for `type=image,push=true`.
### <a name="platform"></a> Set the target platforms for the build (--platform)
```
--platform=value[,value]
```
Set the target platform for the build. All `FROM` commands inside the Dockerfile
without their own `--platform` flag will pull base images for this platform and
this value will also be the platform of the resulting image. The default value
will be the current platform of the buildkit daemon.
When using `docker-container` driver with `buildx`, this flag can accept multiple
values as an input separated by a comma. With multiple values the result will be
built for all of the specified platforms and joined together into a single manifest
list.
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
support for the specified platform. In a clean setup, you can only execute `RUN`
commands for your system architecture.
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
launchers for secondary architectures, buildx will pick them up automatically.
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
and `arm` architectures. You can see what runtime platforms your current builder
instance supports by running `docker buildx inspect --bootstrap`.
Inside a `Dockerfile`, you can access the current platform value through
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
for the full description of automatic platform argument variants .
The formatting for the platform specifier is defined in the [containerd source
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
```console
$ docker buildx build --platform=linux/arm64 .
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
$ docker buildx build --platform=darwin .
```
### <a name="progress"></a> Set type of progress output (--progress)
```
--progress=VALUE
```
Set type of progress output (auto, plain, tty). Use plain to show container
output (default "auto").
> **Note**
>
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
The following example uses `plain` output during the build:
```console
$ docker buildx build --load --progress=plain .
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 227B 0.0s done
#1 DONE 0.1s
#2 [internal] load .dockerignore
#2 transferring context: 129B 0.0s done
#2 DONE 0.0s
...
```
> **Note**
>
> Check also our [Color output controls guide](https://github.com/docker/buildx/blob/master/docs/guides/color-output.md)
> for modifying the colors that are used to output information to the terminal.
### <a name="push"></a> Push the build result to a registry (--push) ### <a name="push"></a> Push the build result to a registry (--push)
Shorthand for [`--output=type=registry`](#registry). Will automatically push the Shorthand for [`--output=type=registry`](#registry). Will automatically push the
build result to registry. build result to registry.
### <a name="secret"></a> Secret to expose to the build (--secret) ### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
single-platform build result to `docker images`.
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
``` ```
--secret=[type=TYPE[,KEY=VALUE] --cache-from=[NAME|type=TYPE[,KEY=VALUE]]
``` ```
Exposes secret to the build. The secret can be used by the build using Use an external cache source for a build. Supported types are `registry`,
[`RUN --mount=type=secret` mount](https://docs.docker.com/engine/reference/builder/#run---mounttypesecret). `local` and `gha`.
If `type` is unset it will be detected. Supported types are: - [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
can import cache from a cache manifest or (special) image configuration on the
registry.
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
import cache from local files previously exported with `--cache-to`.
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
GitHub repository
#### `file` If no type is specified, `registry` exporter is used with a specified reference.
Attribute keys: `docker` driver currently only supports importing build cache from the registry.
- `id` - ID of the secret. Defaults to basename of the `src` path. **Examples**
- `src`, `source` - Secret filename. `id` used if unset.
```dockerfile
# syntax=docker/dockerfile:1
FROM python:3
RUN pip install awscli
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
aws s3 cp s3://... ...
```
```console ```console
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials . $ docker buildx build --cache-from=user/app:cache .
$ docker buildx build --cache-from=user/app .
$ docker buildx build --cache-from=type=registry,ref=user/app .
$ docker buildx build --cache-from=type=local,src=path/to/cache .
$ docker buildx build --cache-from=type=gha .
``` ```
#### `env` More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
Attribute keys: ### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
- `id` - ID of the secret. Defaults to `env` name.
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
```dockerfile
# syntax=docker/dockerfile:1
FROM node:alpine
RUN --mount=type=bind,target=. \
--mount=type=secret,id=SECRET_TOKEN \
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
``` ```
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
```
Export build cache to an external cache destination. Supported types are
`registry`, `local`, `inline` and `gha`.
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
exports cache to a local directory on the client.
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
type writes the cache metadata into the image configuration.
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
`docker` driver currently only supports exporting inline cache metadata to image
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
to trigger inline cache exporter.
Attribute key:
- `mode` - Specifies how many layers are exported with the cache. `min` on only
exports layers already in the final build stage, `max` exports layers for
all stages. Metadata is always exported for the whole build.
**Examples**
```console ```console
$ SECRET_TOKEN=token docker buildx build --secret id=SECRET_TOKEN . $ docker buildx build --cache-to=user/app:cache .
$ docker buildx build --cache-to=type=inline .
$ docker buildx build --cache-to=type=registry,ref=user/app .
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
$ docker buildx build --cache-to=type=gha .
``` ```
### <a name="shm-size"></a> Size of /dev/shm (--shm-size) More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
```
--allow=ENTITLEMENT
```
Allow extra privileged entitlement. List of entitlements:
- `network.host` - Allows executions with host networking.
- `security.insecure` - Allows executions without sandbox. See
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
**Examples**
```console
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
$ docker buildx build --allow security.insecure .
```
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
The format is `<number><unit>`. `number` must be greater than `0`. Unit is The format is `<number><unit>`. `number` must be greater than `0`. Unit is
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
(gigabytes). If you omit the unit, the system uses bytes. (gigabytes). If you omit the unit, the system uses bytes.
### <a name="ssh"></a> SSH agent socket or keys to expose to the build (--ssh)
```
--ssh=default|<id>[=<socket>|<key>[,<key>]]
```
This can be useful when some commands in your Dockerfile need specific SSH
authentication (e.g., cloning a private repository).
`--ssh` exposes SSH agent socket or keys to the build and can be used with the
[`RUN --mount=type=ssh` mount](https://docs.docker.com/engine/reference/builder/#run---mounttypessh).
Example to access Gitlab using an SSH agent socket:
```dockerfile
# syntax=docker/dockerfile:1
FROM alpine
RUN apk add --no-cache openssh-client
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello
# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
# with the type of build progress is defined as `plain`.
```
```console
$ eval $(ssh-agent)
$ ssh-add ~/.ssh/id_rsa
(Input your passphrase here)
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
```
### <a name="ulimit"></a> Set ulimits (--ulimit) ### <a name="ulimit"></a> Set ulimits (--ulimit)
`--ulimit` is specified with a soft and hard limit as such: `--ulimit` is specified with a soft and hard limit as such:

View File

@@ -9,19 +9,19 @@ Create a new builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--append`](#append) | | | Append a node to builder instead of changing it | | [`--append`](#append) | Append a node to builder instead of changing it |
| `--bootstrap` | | | Boot builder after creation | | `--bootstrap` | Boot builder after creation |
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon | | [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
| [`--config`](#config) | `string` | | BuildKit config file | | [`--config string`](#config) | BuildKit config file |
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) | | [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver | | [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it | | [`--leave`](#leave) | Remove a node from builder instead of changing it |
| [`--name`](#name) | `string` | | Builder instance name | | [`--name string`](#name) | Builder instance name |
| [`--node`](#node) | `string` | | Create/modify node with given name | | [`--node string`](#node) | Create/modify node with given name |
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node | | [`--platform stringArray`](#platform) | Fixed platforms for current node |
| [`--use`](#use) | | | Set the current builder instance | | [`--use`](#use) | Set the current builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -47,6 +47,8 @@ The `--append` flag changes the action of the command to append a new node to an
existing builder specified by `--name`. Buildx will choose an appropriate node existing builder specified by `--name`. Buildx will choose an appropriate node
for a build based on the platforms it supports. for a build based on the platforms it supports.
**Examples**
```console ```console
$ docker buildx create mycontext1 $ docker buildx create mycontext1
eager_beaver eager_beaver
@@ -65,6 +67,8 @@ Adds flags when starting the buildkitd daemon. They take precedence over the
configuration file specified by [`--config`](#config). See `buildkitd --help` configuration file specified by [`--config`](#config). See `buildkitd --help`
for the available flags. for the available flags.
**Example**
``` ```
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666' --buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
``` ```
@@ -79,11 +83,6 @@ Specifies the configuration file for the buildkitd daemon to use. The configurat
can be overridden by [`--buildkitd-flags`](#buildkitd-flags). can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md). See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
If the configuration file is not specified, will look for one by default in:
* `$BUILDX_CONFIG/buildkitd.default.toml`
* `$DOCKER_CONFIG/buildx/buildkitd.default.toml`
* `~/.docker/buildx/buildkitd.default.toml`
Note that if you create a `docker-container` builder and have specified Note that if you create a `docker-container` builder and have specified
certificates for registries in the `buildkitd.toml` configuration, the files certificates for registries in the `buildkitd.toml` configuration, the files
will be copied into the container under `/etc/buildkit/certs` and configuration will be copied into the container under `/etc/buildkit/certs` and configuration
@@ -123,59 +122,56 @@ Unlike `docker` driver, built images will not automatically appear in
`docker images` and [`build --load`](buildx_build.md#load) needs to be used `docker images` and [`build --load`](buildx_build.md#load) needs to be used
to achieve that. to achieve that.
#### `remote` driver
Uses a remote instance of buildkitd over an arbitrary connection. With this
driver, you manually create and manage instances of buildkit yourself, and
configure buildx to point at it.
Unlike `docker` driver, built images will not automatically appear in
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
to achieve that.
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt) ### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
``` ```
--driver-opt OPTIONS --driver-opt OPTIONS
``` ```
Passes additional driver-specific options. Passes additional driver-specific options. Details for each driver:
Note: When using quoted values for example for the `nodeselector` or - `docker` - No driver options
`tolerations` options, ensure that quotes are escaped correctly for your shell. - `docker-container`
- `image=IMAGE` - Sets the container image to be used for running buildkit.
- `network=NETMODE` - Sets the network mode for running the buildkit container.
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
- `kubernetes`
- `image=IMAGE` - Sets the container image to be used for running buildkit.
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
#### `docker` driver **Examples**
No driver options. #### Use a custom network
#### `docker-container` driver ```console
$ docker network create foonet
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
$ docker buildx inspect --bootstrap
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
map[foonet:0xc00018c0c0]
```
- `image=IMAGE` - Sets the container image to be used for running buildkit. #### OpenTelemetry support
- `network=NETMODE` - Sets the network mode for running the buildkit container.
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
#### `kubernetes` driver To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
- `image=IMAGE` - Sets the container image to be used for running buildkit. ```console
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace. $ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1. $ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2` $ docker buildx inspect --bootstrap
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G` # buildx command should be traced at http://127.0.0.1:16686/
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2` ```
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
- `"nodeselector=label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
- `"tolerations=key=foo,value=bar;key=foo2,operator=exists;key=foo3,effect=NoSchedule"` - Sets the `Pod` tolerations. Accepts the same values as the kube manifest tolera>tions. Key-value pairs are separated by `,`, tolerations are separated by `;`. No Defaults. Example `tolerations=operator=exists`
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. Needs Kubernetes 1.19 or later. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
#### `remote` driver
- `key=KEY` - Sets the TLS client key.
- `cert=CERT` - Sets the TLS client certificate to present to buildkitd.
- `cacert=CACERT` - Sets the TLS certificate authority used for validation.
- `servername=SERVER` - Sets the TLS server name to be used in requests (defaults to the endpoint hostname).
### <a name="leave"></a> Remove a node from a builder (--leave) ### <a name="leave"></a> Remove a node from a builder (--leave)
@@ -183,6 +179,8 @@ The `--leave` flag changes the action of the command to remove a node from a
builder. The builder needs to be specified with `--name` and node that is removed builder. The builder needs to be specified with `--name` and node that is removed
is set with `--node`. is set with `--node`.
**Examples**
```console ```console
$ docker buildx create --name mybuilder --node mybuilder0 --leave $ docker buildx create --name mybuilder --node mybuilder0 --leave
``` ```
@@ -206,7 +204,7 @@ The `--node` flag specifies the name of the node to be created or modified. If
none is specified, it is the name of the builder it belongs to, with an index none is specified, it is the name of the builder it belongs to, with an index
number suffix. number suffix.
### <a name="platform"></a> Set the platforms supported by the node (--platform) ### <a name="platform"></a> Set the platforms supported by the node
``` ```
--platform PLATFORMS --platform PLATFORMS
@@ -218,12 +216,14 @@ will also automatically detect the platforms it supports, but manual values take
priority over the detected ones and can be used when multiple nodes support priority over the detected ones and can be used when multiple nodes support
building for the same platform. building for the same platform.
**Examples**
```console ```console
$ docker buildx create --platform linux/amd64 $ docker buildx create --platform linux/amd64
$ docker buildx create --platform linux/arm64,linux/arm/v8 $ docker buildx create --platform linux/arm64,linux/arm/v8
``` ```
### <a name="use"></a> Automatically switch to the newly created builder (--use) ### <a name="use"></a> Automatically switch to the newly created builder
The `--use` flag automatically switches the current builder to the newly created The `--use` flag automatically switches the current builder to the newly created
one. Equivalent to running `docker buildx use $(docker buildx create ...)`. one. Equivalent to running `docker buildx use $(docker buildx create ...)`.

View File

@@ -9,11 +9,11 @@ Disk usage
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| `--filter` | `filter` | | Provide filter values | | `--filter filter` | Provide filter values |
| `--verbose` | | | Provide a more verbose output | | `--verbose` | Provide a more verbose output |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -12,14 +12,14 @@ Commands to work on images in registry
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images | | [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
| [`inspect`](buildx_imagetools_inspect.md) | Show details of an image in the registry | | [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -9,20 +9,22 @@ Create a new image based on source images
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--append`](#append) | | | Append to existing manifest | | [`--append`](#append) | Append to existing manifest |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing | | [`--dry-run`](#dry-run) | Show final image instead of pushing |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file | | [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Description ## Description
Imagetools contains commands for working with manifest lists in the registry.
These commands are useful for inspecting multi-platform build results.
Create a new manifest list based on source manifests. The source manifests can Create a new manifest list based on source manifests. The source manifests can
be manifest lists or single platform distribution manifests and must already be manifest lists or single platform distribution manifests and must already
exist in the registry where the new manifest is created. If only one source is exist in the registry where the new manifest is created. If only one source is
@@ -55,15 +57,16 @@ or a JSON of OCI descriptor object.
In order to define annotations or additional platform properties like `os.version` and In order to define annotations or additional platform properties like `os.version` and
`os.features` you need to add them in the OCI descriptor object encoded in JSON. `os.features` you need to add them in the OCI descriptor object encoded in JSON.
```console ```
$ docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
$ docker buildx imagetools create -f descr.json myuser/image docker buildx imagetools create -f descr.json myuser/image
``` ```
The descriptor in the file is merged with existing descriptor in the registry if it exists. The descriptor in the file is merged with existing descriptor in the registry if it exists.
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) . The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
### <a name="tag"></a> Set reference for new image (-t, --tag) ### <a name="tag"></a> Set reference for new image (-t, --tag)
``` ```
@@ -72,7 +75,10 @@ The supported fields for the descriptor are defined in [OCI spec](https://github
Use the `-t` or `--tag` flag to set the name of the image to be created. Use the `-t` or `--tag` flag to set the name of the image to be created.
**Examples**
```console ```console
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204 $ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2 $ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
``` ```

View File

@@ -5,57 +5,40 @@ docker buildx imagetools inspect [OPTIONS] NAME
``` ```
<!---MARKER_GEN_START--> <!---MARKER_GEN_START-->
Show details of an image in the registry Show details of image in the registry
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template | | [`--raw`](#raw) | Show original JSON manifest |
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Description ## Description
Show details of an image in the registry. Show details of image in the registry.
Example:
```console ```console
$ docker buildx imagetools inspect alpine $ docker buildx imagetools inspect alpine
Name: docker.io/library/alpine:latest Name: docker.io/library/alpine:latest
MediaType: application/vnd.docker.distribution.manifest.list.v2+json MediaType: application/vnd.docker.distribution.manifest.list.v2+json
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300 Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
Manifests: Manifests:
Name: docker.io/library/alpine:latest@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3 Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
MediaType: application/vnd.docker.distribution.manifest.v2+json MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/amd64 Platform: linux/amd64
Name: docker.io/library/alpine:latest@sha256:e047bc2af17934d38c5a7fa9f46d443f1de3a7675546402592ef805cfa929f9d Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
MediaType: application/vnd.docker.distribution.manifest.v2+json MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/arm/v6 Platform: linux/arm/v6
...
Name: docker.io/library/alpine:latest@sha256:8483ecd016885d8dba70426fda133c30466f661bb041490d525658f1aac73822
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/arm/v7
Name: docker.io/library/alpine:latest@sha256:c74f1b1166784193ea6c8f9440263b9be6cae07dfe35e32a5df7a31358ac2060
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/arm64/v8
Name: docker.io/library/alpine:latest@sha256:2689e157117d2da668ad4699549e55eba1ceb79cb7862368b30919f0488213f4
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/386
Name: docker.io/library/alpine:latest@sha256:2042a492bcdd847a01cd7f119cd48caa180da696ed2aedd085001a78664407d6
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/ppc64le
Name: docker.io/library/alpine:latest@sha256:49e322ab6690e73a4909f787bcbdb873631264ff4a108cddfd9f9c249ba1d58e
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/s390x
``` ```
## Examples ## Examples
@@ -64,569 +47,7 @@ Manifests:
Same as [`buildx --builder`](buildx.md#builder). Same as [`buildx --builder`](buildx.md#builder).
### <a name="format"></a> Format the output (--format)
Format the output using the given Go template. Defaults to `{{.Manifest}}` if
unset. Following fields are available:
* `.Name`: provides the reference of the image
* `.Manifest`: provides the manifest or manifest list
* `.Image`: provides the image config
* `.BuildInfo`: provides [build info from image config](https://github.com/moby/buildkit/blob/master/docs/build-repro.md#image-config)
#### `.Name`
```console
$ docker buildx imagetools inspect alpine --format "{{.Name}}"
Name: docker.io/library/alpine:latest
```
#### `.Manifest`
```console
$ docker buildx imagetools inspect crazymax/loop --format "{{.Manifest}}"
Name: docker.io/crazymax/loop:latest
MediaType: application/vnd.docker.distribution.manifest.v2+json
Digest: sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1
```
```console
$ docker buildx imagetools inspect moby/buildkit:master --format "{{.Manifest}}"
Name: docker.io/moby/buildkit:master
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
Digest: sha256:3183f7ce54d1efb44c34b84f428ae10aaf141e553c6b52a7ff44cc7083a05a66
Manifests:
Name: docker.io/moby/buildkit:master@sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/amd64
Name: docker.io/moby/buildkit:master@sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/arm/v7
Name: docker.io/moby/buildkit:master@sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/arm64
Name: docker.io/moby/buildkit:master@sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/s390x
Name: docker.io/moby/buildkit:master@sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/ppc64le
Name: docker.io/moby/buildkit:master@sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e
MediaType: application/vnd.docker.distribution.manifest.v2+json
Platform: linux/riscv64
```
#### `.BuildInfo`
```console
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{.BuildInfo}}"
Name: docker.io/crazymax/buildx:buildinfo
Frontend: dockerfile.v0
Attrs:
filename: Dockerfile
source: docker/dockerfile-upstream:master-labs
build-arg:bar: foo
build-arg:foo: bar
Sources:
Type: docker-image
Ref: docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
Pin: sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
Type: docker-image
Ref: docker.io/library/alpine:3.13
Pin: sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c
Type: docker-image
Ref: docker.io/moby/buildkit:v0.9.0
Pin: sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab
Type: docker-image
Ref: docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
Pin: sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
Type: http
Ref: https://raw.githubusercontent.com/moby/moby/master/README.md
Pin: sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c
```
#### JSON output
A `json` go template func is also available if you want to render fields as
JSON bytes:
```console
$ docker buildx imagetools inspect crazymax/loop --format "{{json .Manifest}}"
```
```json
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1",
"size": 949
}
```
```console
$ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manifest}}"
```
```json
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
"size": 2010,
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:bd1e78f06de26610fadf4eb9d04b1a45a545799d6342701726e952cc0c11c912",
"size": 1158,
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:d37dcced63ec0965824fca644f0ac9efad8569434ec15b4c83adfcb3dcfc743b",
"size": 1158,
"platform": {
"architecture": "arm",
"os": "linux",
"variant": "v7"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:ce142eb2255e6af46f2809e159fd03081697c7605a3de03b9cbe9a52ddb244bf",
"size": 1158,
"platform": {
"architecture": "arm64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:f59bfb5062fff76ce464bfa4e25ebaaaac887d6818238e119d68613c456d360c",
"size": 1158,
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:cc96426e0c50a78105d5637d31356db5dd6ec594f21b24276e534a32da09645c",
"size": 1159,
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:39f9c1e2878e6c333acb23187d6b205ce82ed934c60da326cb2c698192631478",
"size": 1158,
"platform": {
"architecture": "riscv64",
"os": "linux"
}
}
]
}
```
```console
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .BuildInfo}}"
```
```json
{
"frontend": "dockerfile.v0",
"attrs": {
"build-arg:bar": "foo",
"build-arg:foo": "bar",
"filename": "Dockerfile",
"source": "crazymax/dockerfile:buildattrs"
},
"sources": [
{
"type": "docker-image",
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
},
{
"type": "docker-image",
"ref": "docker.io/library/alpine:3.13@sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c",
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
},
{
"type": "docker-image",
"ref": "docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab",
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
},
{
"type": "docker-image",
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
},
{
"type": "http",
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
}
]
}
```
```console
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .}}"
```
```json
{
"name": "crazymax/buildx:buildinfo",
"manifest": {
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:899d2c7acbc124d406820857bb51d9089717bbe4e22b97eb4bc5789e99f09f83",
"size": 2628
},
"image": {
"created": "2022-02-24T12:27:43.627154558Z",
"architecture": "amd64",
"os": "linux",
"config": {
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"DOCKER_TLS_CERTDIR=/certs",
"DOCKER_CLI_EXPERIMENTAL=enabled"
],
"Entrypoint": [
"docker-entrypoint.sh"
],
"Cmd": [
"sh"
]
},
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:7fcb75871b2101082203959c83514ac8a9f4ecfee77a0fe9aa73bbe56afdf1b4",
"sha256:d3c0b963ff5684160641f936d6a4aa14efc8ff27b6edac255c07f2d03ff92e82",
"sha256:3f8d78f13fa9b1f35d3bc3f1351d03a027c38018c37baca73f93eecdea17f244",
"sha256:8e6eb1137b182ae0c3f5d40ca46341fda2eaeeeb5fa516a9a2bf96171238e2e0",
"sha256:fde4c869a56b54dd76d7352ddaa813fd96202bda30b9dceb2c2f2ad22fa2e6ce",
"sha256:52025823edb284321af7846419899234b3c66219bf06061692b709875ed0760f",
"sha256:50adb5982dbf6126c7cf279ac3181d1e39fc9116b610b947a3dadae6f7e7c5bc",
"sha256:9801c319e1c66c5d295e78b2d3e80547e73c7e3c63a4b71e97c8ca357224af24",
"sha256:dfbfac44d5d228c49b42194c8a2f470abd6916d072f612a6fb14318e94fde8ae",
"sha256:3dfb74e19dedf61568b917c19b0fd3ee4580870027ca0b6054baf239855d1322",
"sha256:b182e707c23e4f19be73f9022a99d2d1ca7bf1ca8f280d40e4d1c10a6f51550e"
]
},
"history": [
{
"created": "2021-11-12T17:19:58.698676655Z",
"created_by": "/bin/sh -c #(nop) ADD file:5a707b9d6cb5fff532e4c2141bc35707593f21da5528c9e71ae2ddb6ba4a4eb6 in / "
},
{
"created": "2021-11-12T17:19:58.948920855Z",
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
"empty_layer": true
},
{
"created": "2022-02-24T12:27:38.285594601Z",
"created_by": "RUN /bin/sh -c apk --update --no-cache add bash ca-certificates openssh-client \u0026\u0026 rm -rf /tmp/* /var/cache/apk/* # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:41.061874167Z",
"created_by": "COPY /opt/docker/ /usr/local/bin/ # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:41.174098947Z",
"created_by": "COPY /usr/bin/buildctl /usr/local/bin/buildctl # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:41.320343683Z",
"created_by": "COPY /usr/bin/buildkit* /usr/local/bin/ # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:41.447149933Z",
"created_by": "COPY /buildx /usr/libexec/docker/cli-plugins/docker-buildx # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.057722191Z",
"created_by": "COPY /opt/docker-compose /usr/libexec/docker/cli-plugins/docker-compose # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.145224134Z",
"created_by": "ADD https://raw.githubusercontent.com/moby/moby/master/README.md / # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.422212427Z",
"created_by": "ENV DOCKER_TLS_CERTDIR=/certs",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
},
{
"created": "2022-02-24T12:27:43.422212427Z",
"created_by": "ENV DOCKER_CLI_EXPERIMENTAL=enabled",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
},
{
"created": "2022-02-24T12:27:43.422212427Z",
"created_by": "RUN /bin/sh -c docker --version \u0026\u0026 buildkitd --version \u0026\u0026 buildctl --version \u0026\u0026 docker buildx version \u0026\u0026 docker compose version \u0026\u0026 mkdir /certs /certs/client \u0026\u0026 chmod 1777 /certs /certs/client # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.514320155Z",
"created_by": "COPY rootfs/modprobe.sh /usr/local/bin/modprobe # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.627154558Z",
"created_by": "COPY rootfs/docker-entrypoint.sh /usr/local/bin/ # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T12:27:43.627154558Z",
"created_by": "ENTRYPOINT [\"docker-entrypoint.sh\"]",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
},
{
"created": "2022-02-24T12:27:43.627154558Z",
"created_by": "CMD [\"sh\"]",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
}
]
},
"buildinfo": {
"frontend": "dockerfile.v0",
"attrs": {
"build-arg:bar": "foo",
"build-arg:foo": "bar",
"filename": "Dockerfile",
"source": "docker/dockerfile-upstream:master-labs"
},
"sources": [
{
"type": "docker-image",
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
},
{
"type": "docker-image",
"ref": "docker.io/library/alpine:3.13",
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
},
{
"type": "docker-image",
"ref": "docker.io/moby/buildkit:v0.9.0",
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
},
{
"type": "docker-image",
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
},
{
"type": "http",
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
}
]
}
}
```
#### Multi-platform
Multi-platform images are supported for `.Image` and `.BuildInfo` fields. If
you want to pick up a specific platform, you can specify it using the `index`
go template function:
```console
$ docker buildx imagetools inspect --format '{{json (index .Image "linux/s390x")}}' moby/buildkit:master
```
```json
{
"created": "2022-02-25T17:13:27.89891722Z",
"architecture": "s390x",
"os": "linux",
"config": {
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Entrypoint": [
"buildkitd"
],
"Volumes": {
"/var/lib/buildkit": {}
}
},
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:41048e32d0684349141cf05f629c5fc3c5915d1f3426b66dbb8953a540e01e1e",
"sha256:2651209b9208fff6c053bc3c17353cb07874e50f1a9bc96d6afd03aef63de76a",
"sha256:6741ed7e73039d853fa8902246a4c7e8bf9dd09652fd1b08251bc5f9e8876a7f",
"sha256:92ac046adeeb65c86ae3f0b458dee04ad4a462e417661c04d77642c66494f69b"
]
},
"history": [
{
"created": "2021-11-24T20:41:23.709681315Z",
"created_by": "/bin/sh -c #(nop) ADD file:cd24c711a2ef431b3ff94f9a02bfc42f159bc60de1d0eceecafea4e8af02441d in / "
},
{
"created": "2021-11-24T20:41:23.94211262Z",
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
"empty_layer": true
},
{
"created": "2022-01-26T18:15:21.449825391Z",
"created_by": "RUN /bin/sh -c apk add --no-cache fuse3 git openssh pigz xz \u0026\u0026 ln -s fusermount3 /usr/bin/fusermount # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-24T00:34:00.924540012Z",
"created_by": "COPY examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-25T17:13:27.89891722Z",
"created_by": "VOLUME [/var/lib/buildkit]",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
},
{
"created": "2022-02-25T17:13:27.89891722Z",
"created_by": "COPY / /usr/bin/ # buildkit",
"comment": "buildkit.dockerfile.v0"
},
{
"created": "2022-02-25T17:13:27.89891722Z",
"created_by": "ENTRYPOINT [\"buildkitd\"]",
"comment": "buildkit.dockerfile.v0",
"empty_layer": true
}
]
}
```
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw) ### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
Use the `--raw` option to print the unformatted JSON manifest bytes. Use the `--raw` option to print the original JSON bytes instead of the formatted
output.
> `jq` is used here to get a better rendering of the output result.
```console
$ docker buildx imagetools inspect --raw crazymax/loop | jq
```
```json
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"digest": "sha256:7ace7d324e79b360b2db8b820d83081863d96d22e734cdf297a8e7fd83f6ceb3",
"size": 2298
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"digest": "sha256:5843afab387455b37944e709ee8c78d7520df80f8d01cf7f861aae63beeddb6b",
"size": 2811478
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"digest": "sha256:726d3732a87e1c430d67e8969de6b222a889d45e045ebae1a008a37ba38f3b1f",
"size": 1776812
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"digest": "sha256:5d7cf9b33148a8f220c84f27dd2cfae46aca019a3ea3fbf7274f6d6dbfae8f3b",
"size": 382855
}
]
}
```
```console
$ docker buildx imagetools inspect --raw moby/buildkit:master | jq
```
```json
{
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1",
"size": 1158,
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775",
"size": 1158,
"platform": {
"architecture": "arm",
"os": "linux",
"variant": "v7"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be",
"size": 1158,
"platform": {
"architecture": "arm64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b",
"size": 1158,
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1",
"size": 1159,
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e",
"size": 1158,
"platform": {
"architecture": "riscv64",
"os": "linux"
}
}
]
}
```

View File

@@ -9,10 +9,10 @@ Inspect current builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting | | [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -43,10 +43,6 @@ name of the builder to inspect to get information about that builder.
The following example shows information about a builder instance named The following example shows information about a builder instance named
`elated_tesla`: `elated_tesla`:
> **Note**
>
> Asterisk `*` next to node build platform(s) indicate they had been set manually during `buildx create`. Otherwise, it had been autodetected.
```console ```console
$ docker buildx inspect elated_tesla $ docker buildx inspect elated_tesla
@@ -57,12 +53,10 @@ Nodes:
Name: elated_tesla0 Name: elated_tesla0
Endpoint: unix:///var/run/docker.sock Endpoint: unix:///var/run/docker.sock
Status: running Status: running
Buildkit: v0.10.3
Platforms: linux/amd64 Platforms: linux/amd64
Name: elated_tesla1 Name: elated_tesla1
Endpoint: ssh://ubuntu@1.2.3.4 Endpoint: ssh://ubuntu@1.2.3.4
Status: running Status: running
Buildkit: v0.10.3 Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
Platforms: linux/arm64*, linux/arm/v7, linux/arm/v6
``` ```

View File

@@ -14,16 +14,18 @@ List builder instances
Lists all builder instances and the nodes for each instance Lists all builder instances and the nodes for each instance
**Example**
```console ```console
$ docker buildx ls $ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
elated_tesla * docker-container elated_tesla * docker-container
elated_tesla0 unix:///var/run/docker.sock running v0.10.3 linux/amd64 elated_tesla0 unix:///var/run/docker.sock running linux/amd64
elated_tesla1 ssh://ubuntu@1.2.3.4 running v0.10.3 linux/arm64*, linux/arm/v7, linux/arm/v6 elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
default docker default docker
default default running 20.10.14 linux/amd64 default default running linux/amd64
``` ```
Each builder has one or more nodes associated with it. The current builder's Each builder has one or more nodes associated with it. The current builder's
name is marked with a `*` in `NAME/NODE` and explicit node to build against for name is marked with a `*`.
the target platform marked with a `*` in the `PLATFORMS` column.

View File

@@ -9,38 +9,18 @@ Remove build cache
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| `-a`, `--all` | | | Include internal/frontend images | | `-a`, `--all` | Remove all unused images, not just dangling ones |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) | | `--filter filter` | Provide filter values (e.g., `until=24h`) |
| `-f`, `--force` | | | Do not prompt for confirmation | | `-f`, `--force` | Do not prompt for confirmation |
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache | | `--keep-storage bytes` | Amount of disk space to keep for cache |
| `--verbose` | | | Provide a more verbose output | | `--verbose` | Provide a more verbose output |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Description
Clears the build cache of the selected builder.
You can finely control what cache data is kept using:
- The `--filter=until=<duration>` flag to keep images that have been used in
the last `<duration>` time.
`<duration>` is a duration string, e.g. `24h` or `2h30m`, with allowable
units of `(h)ours`, `(m)inutes` and `(s)econds`.
- The `--keep-storage=<size>` flag to keep `<size>` bytes of data in the cache.
`<size>` is a human-readable memory string, e.g. `128mb`, `2gb`, etc. Units
are case-insensitive.
- The `--all` flag to allow clearing internal helper images and frontend images
set using the `#syntax=` directive or the `BUILDKIT_SYNTAX` build argument.
## Examples ## Examples
### <a name="builder"></a> Override the configured builder instance (--builder) ### <a name="builder"></a> Override the configured builder instance (--builder)

View File

@@ -9,13 +9,10 @@ Remove a builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders | | [`--builder string`](#builder) | Override the configured builder instance |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--keep-state`](#keep-state) | Keep BuildKit state |
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
| [`--keep-daemon`](#keep-daemon) | | | Keep the buildkitd daemon running |
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -27,32 +24,10 @@ default builder.
## Examples ## Examples
### <a name="all-inactive"></a> Remove all inactive builders (--all-inactive)
Remove builders that are not in running state.
```console
$ docker buildx rm --all-inactive
WARNING! This will remove all builders that are not in running state. Are you sure you want to continue? [y/N] y
```
### <a name="builder"></a> Override the configured builder instance (--builder) ### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder). Same as [`buildx --builder`](buildx.md#builder).
### <a name="force"></a> Do not prompt for confirmation (--force)
Do not prompt for confirmation before removing inactive builders.
```console
$ docker buildx rm --all-inactive --force
```
### <a name="keep-daemon"></a> Keep the buildkitd daemon running (--keep-daemon)
Keep the buildkitd daemon running after the buildx context is removed. This is useful when you manage buildkitd daemons and buildx contexts independently.
Currently, only supported by the [`docker-container` and `kubernetes` drivers](buildx_create.md#driver).
### <a name="keep-state"></a> Keep BuildKit state (--keep-state) ### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
Keep BuildKit state, so it can be reused by a new builder with the same name. Keep BuildKit state, so it can be reused by a new builder with the same name.

View File

@@ -9,9 +9,9 @@ Stop builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -9,11 +9,11 @@ Set the current builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Description |
| --- | --- | --- | --- | | --- | --- |
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder string`](#builder) | Override the configured builder instance |
| `--default` | | | Set builder as default for current context | | `--default` | Set builder as default for current context |
| `--global` | | | Builder persists context changes | | `--global` | Builder persists context changes |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -10,9 +10,10 @@ Show buildx version information
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Description ## Examples
### View version information
View version information
```console ```console
$ docker buildx version $ docker buildx version

Some files were not shown because too many files have changed in this diff Show More