mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-15 00:05:57 +08:00
Compare commits
254 Commits
v0.15.0-rc
...
v0.17.0-rc
Author | SHA1 | Date | |
---|---|---|---|
![]() |
40ef3446f5 | ||
![]() |
7213b2a814 | ||
![]() |
9cfa25ab40 | ||
![]() |
6db3444a25 | ||
![]() |
15e930b691 | ||
![]() |
abc5eaed88 | ||
![]() |
ad9a5196b3 | ||
![]() |
db117855da | ||
![]() |
ecfe98df6f | ||
![]() |
479177eaf9 | ||
![]() |
194f523fe1 | ||
![]() |
29d367bdd4 | ||
![]() |
ed341bafd0 | ||
![]() |
c887c2c62a | ||
![]() |
7c481aae20 | ||
![]() |
f0f8876902 | ||
![]() |
fa1d19bb1e | ||
![]() |
7bea00f3dd | ||
![]() |
83d5c0c61b | ||
![]() |
e58a1d35d1 | ||
![]() |
b920b08ad3 | ||
![]() |
f369377d74 | ||
![]() |
b7486e5cd5 | ||
![]() |
5ecff53e0c | ||
![]() |
48faab5890 | ||
![]() |
f77866f5b4 | ||
![]() |
203fd8aee5 | ||
![]() |
806ccd3545 | ||
![]() |
d6e030eda7 | ||
![]() |
96eb69aea4 | ||
![]() |
d1d8d6e19c | ||
![]() |
dc7f679ab1 | ||
![]() |
e403ab2d63 | ||
![]() |
b6a2c96926 | ||
![]() |
7a7a9c8e01 | ||
![]() |
fa8f859159 | ||
![]() |
8411a763d9 | ||
![]() |
6c5279da54 | ||
![]() |
0e64eb4f8b | ||
![]() |
adbcc2225e | ||
![]() |
e00efeb399 | ||
![]() |
d03c13b947 | ||
![]() |
4787b5c046 | ||
![]() |
1c66f293c7 | ||
![]() |
246a36d463 | ||
![]() |
a4adae3d6b | ||
![]() |
36cd88f8ca | ||
![]() |
07a85a544b | ||
![]() |
f64b85afe6 | ||
![]() |
4b27fb3022 | ||
![]() |
38a8261f05 | ||
![]() |
a3e6f4be15 | ||
![]() |
6467a86427 | ||
![]() |
58571ff6d6 | ||
![]() |
71174c3041 | ||
![]() |
16860e6dd2 | ||
![]() |
8e02b1a2f7 | ||
![]() |
531c6d4ff1 | ||
![]() |
238a3e03dd | ||
![]() |
9a0c320588 | ||
![]() |
acf0216292 | ||
![]() |
5a50d13641 | ||
![]() |
2810f20f3a | ||
![]() |
e2f6808457 | ||
![]() |
39bbb9e478 | ||
![]() |
771f0139ac | ||
![]() |
6034c58285 | ||
![]() |
199890ff51 | ||
![]() |
d391b1d3e6 | ||
![]() |
f4da6b8f69 | ||
![]() |
386d599309 | ||
![]() |
d130f8ef0a | ||
![]() |
b691a10379 | ||
![]() |
e628f9ea14 | ||
![]() |
0fb0b6db0d | ||
![]() |
6efb1d7cdc | ||
![]() |
bc2748da59 | ||
![]() |
d4c4632cf6 | ||
![]() |
cdd46af015 | ||
![]() |
b62d64b2b5 | ||
![]() |
64171cb13e | ||
![]() |
f28dff7598 | ||
![]() |
3d542f3d31 | ||
![]() |
30dbdcfa3e | ||
![]() |
16518091cd | ||
![]() |
897fc91802 | ||
![]() |
c4d3011a98 | ||
![]() |
a47f761c55 | ||
![]() |
aa35c954f3 | ||
![]() |
56df4e98a0 | ||
![]() |
9f00a9eafa | ||
![]() |
56cb197c0a | ||
![]() |
466006849a | ||
![]() |
738f5ee9db | ||
![]() |
9b49cf3ae6 | ||
![]() |
bd0b425734 | ||
![]() |
7823a2dc01 | ||
![]() |
cedbc5d68d | ||
![]() |
12d431d1b4 | ||
![]() |
ca452c47d8 | ||
![]() |
d8f26f79ed | ||
![]() |
4304d388ef | ||
![]() |
96509847b9 | ||
![]() |
52bb668085 | ||
![]() |
85cf3bace9 | ||
![]() |
b92bfb53d2 | ||
![]() |
6c929a45c7 | ||
![]() |
d296d5d46a | ||
![]() |
6e433da23f | ||
![]() |
3005743f7c | ||
![]() |
d64d3a4caf | ||
![]() |
0d37d68efd | ||
![]() |
03a691a0a5 | ||
![]() |
fa392a2dca | ||
![]() |
470e45e599 | ||
![]() |
2a2648b1db | ||
![]() |
ac930bda69 | ||
![]() |
6791ecb628 | ||
![]() |
d717237e4f | ||
![]() |
ee642ecc4c | ||
![]() |
06d96d665e | ||
![]() |
dc83501a5b | ||
![]() |
0f74f9a794 | ||
![]() |
6d6adc11a1 | ||
![]() |
68076909b9 | ||
![]() |
7957b73a30 | ||
![]() |
1dceb49a27 | ||
![]() |
b96ad59f64 | ||
![]() |
50aa895477 | ||
![]() |
74374ea418 | ||
![]() |
6bbe59697a | ||
![]() |
c51004e2e4 | ||
![]() |
8535c6b455 | ||
![]() |
153e5ed274 | ||
![]() |
cc097db675 | ||
![]() |
35313e865f | ||
![]() |
233b869c63 | ||
![]() |
7460f049f2 | ||
![]() |
8f4c8b094a | ||
![]() |
8da28574b0 | ||
![]() |
7e49141c4e | ||
![]() |
5ec703ba10 | ||
![]() |
1ffc6f1d58 | ||
![]() |
f65631546d | ||
![]() |
6fc19c4024 | ||
![]() |
5656c98133 | ||
![]() |
263a9ddaee | ||
![]() |
1774aa0cf0 | ||
![]() |
7b80ad7069 | ||
![]() |
c0c4d7172b | ||
![]() |
e498ba9c27 | ||
![]() |
2e7e7abe42 | ||
![]() |
048ef1fbf8 | ||
![]() |
cbe7901667 | ||
![]() |
f374f64d2f | ||
![]() |
4be2259719 | ||
![]() |
6627f315cb | ||
![]() |
19d838a3f4 | ||
![]() |
17878d641e | ||
![]() |
63eb73d9cf | ||
![]() |
59a0ffcf83 | ||
![]() |
2b17f277a1 | ||
![]() |
ea7c8e83d2 | ||
![]() |
9358c45b46 | ||
![]() |
cfb7fc4fb5 | ||
![]() |
d4b112ab05 | ||
![]() |
f7a32361ea | ||
![]() |
af902caeaa | ||
![]() |
04000db8da | ||
![]() |
b8da14166c | ||
![]() |
c1f680df14 | ||
![]() |
b6482ab6bb | ||
![]() |
6f45b0ea06 | ||
![]() |
3971361ed2 | ||
![]() |
818045482e | ||
![]() |
f8e1746d0d | ||
![]() |
92a6799514 | ||
![]() |
9358f84668 | ||
![]() |
dbdd3601eb | ||
![]() |
a3c8a72b54 | ||
![]() |
4c3af9becf | ||
![]() |
d8c9ebde1f | ||
![]() |
01a50aac42 | ||
![]() |
f7bcafed21 | ||
![]() |
e5ded4b2de | ||
![]() |
6ef443de41 | ||
![]() |
076e19d0ce | ||
![]() |
5599699d29 | ||
![]() |
d155747029 | ||
![]() |
9cebd0c80f | ||
![]() |
7b1ec7211d | ||
![]() |
689fd74104 | ||
![]() |
0dfd315daa | ||
![]() |
9b100c2552 | ||
![]() |
92aaaa8f67 | ||
![]() |
6111d9a00d | ||
![]() |
310aaf1891 | ||
![]() |
6c7e65c789 | ||
![]() |
66b0abf078 | ||
![]() |
6efa26c2de | ||
![]() |
5b726afa5e | ||
![]() |
009f318bbd | ||
![]() |
9f7c8ea3fb | ||
![]() |
be12199eb9 | ||
![]() |
94355517c4 | ||
![]() |
cb1be7214a | ||
![]() |
f42a4a1e94 | ||
![]() |
4d7365018c | ||
![]() |
3d0951b800 | ||
![]() |
bcd04d5a64 | ||
![]() |
b00001d8ac | ||
![]() |
31187735de | ||
![]() |
3373a27f1f | ||
![]() |
56698805a9 | ||
![]() |
4c2e0c4307 | ||
![]() |
fb6a3178c9 | ||
![]() |
8ca18dee2d | ||
![]() |
917d2f4a0a | ||
![]() |
366328ba6a | ||
![]() |
5f822b36d3 | ||
![]() |
e423d096a6 | ||
![]() |
927fb6731c | ||
![]() |
314ca32446 | ||
![]() |
3b25e3fa5c | ||
![]() |
41d369120b | ||
![]() |
56ffe55f81 | ||
![]() |
6d5823beb1 | ||
![]() |
c116af7b82 | ||
![]() |
fb130243f8 | ||
![]() |
29c8107b85 | ||
![]() |
ee3baa54f7 | ||
![]() |
9de95d81eb | ||
![]() |
d3a53189f7 | ||
![]() |
0496dae9d5 | ||
![]() |
40fcf992b1 | ||
![]() |
85c25f719c | ||
![]() |
875e4cd52e | ||
![]() |
24cedc6c0f | ||
![]() |
59f52c9505 | ||
![]() |
1e916ae6c6 | ||
![]() |
d342cb9d03 | ||
![]() |
9fdc99dc76 | ||
![]() |
ab835fd904 | ||
![]() |
87efbd43b5 | ||
![]() |
39db6159f9 | ||
![]() |
922328cbaf | ||
![]() |
aa0f90fdd6 | ||
![]() |
82b6826cd7 | ||
![]() |
1e3aec1ae2 | ||
![]() |
cfef22ddf0 | ||
![]() |
9e5ba66553 | ||
![]() |
9ceda78057 | ||
![]() |
747b75a217 | ||
![]() |
945e774a02 |
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
@@ -11,5 +11,5 @@ updates:
|
||||
# trigger a new version: https://github.com/docker/buildx/pull/2222#issuecomment-1919092153
|
||||
- dependency-name: "docker/docs"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "area/dependencies"
|
||||
- "bot"
|
||||
|
104
.github/labeler.yml
vendored
Normal file
104
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
|
||||
# Add 'area/project' label to changes in basic project documentation and .github folder, excluding .github/workflows
|
||||
area/project:
|
||||
- all:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- .github/**
|
||||
- LICENSE
|
||||
- AUTHORS
|
||||
- MAINTAINERS
|
||||
- PROJECT.md
|
||||
- README.md
|
||||
- .gitignore
|
||||
- codecov.yml
|
||||
- all-globs-to-all-files: '!.github/workflows/*'
|
||||
|
||||
# Add 'area/github-actions' label to changes in the .github/workflows folder
|
||||
area/ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '.github/workflows/**'
|
||||
|
||||
# Add 'area/bake' label to changes in the bake
|
||||
area/bake:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'bake/**'
|
||||
|
||||
# Add 'area/bake/compose' label to changes in the bake+compose
|
||||
area/bake/compose:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- bake/compose.go
|
||||
- bake/compose_test.go
|
||||
|
||||
# Add 'area/build' label to changes in build files
|
||||
area/build:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'build/**'
|
||||
|
||||
# Add 'area/builder' label to changes in builder files
|
||||
area/builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'builder/**'
|
||||
|
||||
# Add 'area/cli' label to changes in the CLI
|
||||
area/cli:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- cmd/**
|
||||
- commands/**
|
||||
|
||||
# Add 'area/controller' label to changes in the controller
|
||||
area/controller:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'controller/**'
|
||||
|
||||
# Add 'area/docs' label to markdown files in the docs folder
|
||||
area/docs:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'docs/**/*.md'
|
||||
|
||||
# Add 'area/dependencies' label to changes in go dependency files
|
||||
area/dependencies:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- go.mod
|
||||
- go.sum
|
||||
- vendor/**
|
||||
|
||||
# Add 'area/driver' label to changes in the driver folder
|
||||
area/driver:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/**'
|
||||
|
||||
# Add 'area/driver/docker' label to changes in the docker driver
|
||||
area/driver/docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/docker/**'
|
||||
|
||||
# Add 'area/driver/docker-container' label to changes in the docker-container driver
|
||||
area/driver/docker-container:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/docker-container/**'
|
||||
|
||||
# Add 'area/driver/kubernetes' label to changes in the kubernetes driver
|
||||
area/driver/kubernetes:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/kubernetes/**'
|
||||
|
||||
# Add 'area/driver/remote' label to changes in the remote driver
|
||||
area/driver/remote:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/remote/**'
|
||||
|
||||
# Add 'area/hack' label to changes in the hack folder
|
||||
area/hack:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'hack/**'
|
||||
|
||||
# Add 'area/tests' label to changes in test files
|
||||
area/tests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- tests/**
|
||||
- '**/*_test.go'
|
90
.github/workflows/build.yml
vendored
90
.github/workflows/build.yml
vendored
@@ -21,21 +21,23 @@ on:
|
||||
env:
|
||||
BUILDX_VERSION: "latest"
|
||||
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
SCOUT_VERSION: "1.11.0"
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
DESTDIR: "./bin"
|
||||
TEST_CACHE_SCOPE: "test"
|
||||
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||
GOTESTSUM_FORMAT: "standard-verbose"
|
||||
GO_VERSION: "1.21"
|
||||
GO_VERSION: "1.22"
|
||||
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||
|
||||
jobs:
|
||||
test-integration:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
TESTFLAGS_DOCKER: "-v --parallel=1 --timeout=30m"
|
||||
TEST_IMAGE_BUILD: "0"
|
||||
TEST_IMAGE_ID: "buildx-tests"
|
||||
TEST_COVERAGE: "1"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -43,9 +45,9 @@ jobs:
|
||||
- master
|
||||
- latest
|
||||
- buildx-stable-1
|
||||
- v0.13.1
|
||||
- v0.14.1
|
||||
- v0.13.2
|
||||
- v0.12.5
|
||||
- v0.11.6
|
||||
worker:
|
||||
- docker-container
|
||||
- remote
|
||||
@@ -105,7 +107,7 @@ jobs:
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build test image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: integration-test
|
||||
set: |
|
||||
@@ -125,6 +127,7 @@ jobs:
|
||||
directory: ./bin/testreports
|
||||
flags: integration
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
disable_file_fixes: true
|
||||
-
|
||||
name: Generate annotations
|
||||
if: always()
|
||||
@@ -145,7 +148,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
- ubuntu-24.04
|
||||
- macos-12
|
||||
- windows-2022
|
||||
env:
|
||||
@@ -197,6 +200,7 @@ jobs:
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
disable_file_fixes: true
|
||||
-
|
||||
name: Generate annotations
|
||||
if: always()
|
||||
@@ -211,8 +215,38 @@ jobs:
|
||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||
|
||||
govulncheck:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Run
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: govulncheck
|
||||
env:
|
||||
GOVULNCHECK_FORMAT: sarif
|
||||
-
|
||||
name: Upload SARIF report
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||
|
||||
prepare-binaries:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
@@ -230,7 +264,7 @@ jobs:
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
binaries:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- prepare-binaries
|
||||
strategy:
|
||||
@@ -273,7 +307,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
bin-image:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- test-integration
|
||||
- test-unit
|
||||
@@ -313,7 +347,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
-
|
||||
name: Build and push image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
@@ -325,8 +359,40 @@ jobs:
|
||||
*.cache-from=type=gha,scope=bin-image
|
||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||
|
||||
scout:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
permissions:
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
needs:
|
||||
- bin-image
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
-
|
||||
name: Scout
|
||||
id: scout
|
||||
uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4
|
||||
with:
|
||||
version: ${{ env.SCOUT_VERSION }}
|
||||
format: sarif
|
||||
image: registry://${{ env.REPO_SLUG }}:master
|
||||
-
|
||||
name: Upload SARIF report
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ steps.scout.outputs.result-file }}
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- test-integration
|
||||
- test-unit
|
||||
@@ -356,7 +422,7 @@ jobs:
|
||||
-
|
||||
name: GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 # v2.0.5
|
||||
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -13,11 +13,11 @@ permissions:
|
||||
security-events: write
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21"
|
||||
GO_VERSION: "1.22"
|
||||
|
||||
jobs:
|
||||
codeql:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
|
6
.github/workflows/docs-release.yml
vendored
6
.github/workflows/docs-release.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
open-pr:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||
steps:
|
||||
-
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Generate yaml
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||
targets: update-docs
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||
-
|
||||
name: Create PR on docs repo
|
||||
uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5
|
||||
uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v7.0.1
|
||||
with:
|
||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||
push-to-fork: docker-tools-robot/docker.github.io
|
||||
|
4
.github/workflows/docs-upstream.yml
vendored
4
.github/workflows/docs-upstream.yml
vendored
@@ -22,7 +22,7 @@ on:
|
||||
|
||||
jobs:
|
||||
docs-yaml:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
version: latest
|
||||
-
|
||||
name: Build reference YAML docs
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: update-docs
|
||||
provenance: false
|
||||
|
9
.github/workflows/e2e.yml
vendored
9
.github/workflows/e2e.yml
vendored
@@ -22,7 +22,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
version: latest
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: binaries
|
||||
set: |
|
||||
@@ -84,6 +84,8 @@ jobs:
|
||||
endpoint: tcp://localhost:1234
|
||||
- driver: docker-container
|
||||
metadata-provenance: max
|
||||
- driver: docker-container
|
||||
metadata-warnings: true
|
||||
exclude:
|
||||
- driver: docker
|
||||
multi-node: mnode-true
|
||||
@@ -134,6 +136,9 @@ jobs:
|
||||
if [ -n "${{ matrix.metadata-provenance }}" ]; then
|
||||
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ -n "${{ matrix.metadata-warnings }}" ]; then
|
||||
echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
|
||||
fi
|
||||
-
|
||||
name: Install k3s
|
||||
if: matrix.driver == 'kubernetes'
|
||||
|
21
.github/workflows/labeler.yml
vendored
Normal file
21
.github/workflows/labeler.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: labeler
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Run
|
||||
uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
6
.github/workflows/validate.yml
vendored
6
.github/workflows/validate.yml
vendored
@@ -18,7 +18,7 @@ on:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
includes: ${{ steps.matrix.outputs.includes }}
|
||||
steps:
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
});
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- prepare
|
||||
strategy:
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
version: latest
|
||||
-
|
||||
name: Validate
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
set: |
|
||||
|
@@ -1,12 +1,8 @@
|
||||
run:
|
||||
timeout: 30m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
|
||||
modules-download-mode: vendor
|
||||
|
||||
build-tags:
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- gofmt
|
||||
@@ -37,13 +33,18 @@ linters-settings:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
# The io/ioutil package has been deprecated.
|
||||
# https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "github.com/containerd/containerd/errdefs"
|
||||
desc: The containerd errdefs package was migrated to a separate module. Use github.com/containerd/errdefs instead.
|
||||
- pkg: "github.com/containerd/containerd/log"
|
||||
desc: The containerd log package was migrated to a separate module. Use github.com/containerd/log instead.
|
||||
- pkg: "github.com/containerd/containerd/platforms"
|
||||
desc: The containerd platforms package was migrated to a separate module. Use github.com/containerd/platforms instead.
|
||||
- pkg: "io/ioutil"
|
||||
desc: The io/ioutil package has been deprecated.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||
gosec:
|
||||
excludes:
|
||||
- G204 # Audit use of command execution
|
||||
@@ -52,6 +53,8 @@ linters-settings:
|
||||
G306: "0644"
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- revive
|
||||
@@ -72,6 +75,6 @@ issues:
|
||||
- revive
|
||||
text: "if-return"
|
||||
|
||||
# show all
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
# show all
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
14
.mailmap
14
.mailmap
@@ -1,11 +1,25 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
|
69
AUTHORS
69
AUTHORS
@@ -1,45 +1,112 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
accetto <34798830+accetto@users.noreply.github.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alex Couture-Beil <alex@earthly.dev>
|
||||
Andrew Haines <andrew.haines@zencargo.com>
|
||||
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||
Anthony Poschen <zanven42@gmail.com>
|
||||
Arnold Sobanski <arnold@l4g.dev>
|
||||
Artur Klauser <Artur.Klauser@computer.org>
|
||||
Batuhan Apaydın <developerguy2@gmail.com>
|
||||
Avi Deitcher <avi@deitcher.net>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Ben Peachey <potherca@gmail.com>
|
||||
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||
Bin Du <bindu@microsoft.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bryce Lampe <bryce@pulumi.com>
|
||||
Cameron Adams <pnzreba@gmail.com>
|
||||
Christian Dupuis <cd@atomist.com>
|
||||
Cory Snider <csnider@mirantis.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
David Gageot <david.gageot@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Scott <dave@recoil.org>
|
||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
Devin Bayer <dev@doubly.so>
|
||||
Djordje Lukic <djordje.lukic@docker.com>
|
||||
Dmitry Makovey <dmakovey@gitlab.com>
|
||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||
Donghui Wang <977675308@qq.com>
|
||||
Doug Borg <dougborg@apple.com>
|
||||
Edgar Lee <edgarl@netflix.com>
|
||||
Eli Treuherz <et@arenko.group>
|
||||
Eliott Wiener <eliottwiener@gmail.com>
|
||||
Elran Shefer <elran.shefer@velocity.tech>
|
||||
faust <faustin@fala.red>
|
||||
Felipe Santos <felipecassiors@gmail.com>
|
||||
Felix de Souza <fdesouza@palantir.com>
|
||||
Fernando Miguel <github@FernandoMiguel.net>
|
||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||
gracenoah <gracenoahgh@gmail.com>
|
||||
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||
guoguangwu <guoguangwu@magic-shield.com>
|
||||
Hollow Man <hollowman@hollowman.ml>
|
||||
Ian King'ori <kingorim.ian@gmail.com>
|
||||
idnandre <andre@idntimes.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||
Jonathan Piché <jpiche@coveo.com>
|
||||
Justin Chadwell <me@jedevc.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
khs1994 <khs1994@khs1994.com>
|
||||
Kijima Daigo <norimaking777@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Kotaro Adachi <k33asby@gmail.com>
|
||||
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||
l00397676 <lujingxiao@huawei.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Laurent Goderre <laurent.goderre@docker.com>
|
||||
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||
Michal Augustyn <michal.augustyn@mail.com>
|
||||
Milas Bowman <milas.bowman@docker.com>
|
||||
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||
Moleus <fafufuburr@gmail.com>
|
||||
Nick Santos <nick.santos@docker.com>
|
||||
Nick Sieger <nick@nicksieger.com>
|
||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Niklas Gehlen <niklas@namespacelabs.com>
|
||||
Patrick Van Stee <patrick@vanstee.me>
|
||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||
Phong Tran <tran.pho@northeastern.edu>
|
||||
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
robertlestak <robert.lestak@umusic.com>
|
||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||
Sean P. Kane <spkane00@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Simon A. Eugster <simon.eu@gmail.com>
|
||||
Solomon Hykes <sh.github.6811@hykes.org>
|
||||
Sumner Warren <sumner.warren@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tim Smith <tismith@rvohealth.com>
|
||||
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||
Tyler Smith <tylerlwsmith@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Ulysses Souza <ulyssessouza@gmail.com>
|
||||
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||
Wang Jinglei <morlay.null@gmail.com>
|
||||
Wei <daviseago@gmail.com>
|
||||
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||
Xiang Dai <764524258@qq.com>
|
||||
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||
zelahi <elahi.zuhayr@gmail.com>
|
||||
Zero <tobewhatwewant@gmail.com>
|
||||
zhyon404 <zhyong4@gmail.com>
|
||||
Zsolt <zsolt.szeberenyi@figured.com>
|
||||
|
46
Dockerfile
46
Dockerfile
@@ -1,19 +1,20 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22
|
||||
ARG XX_VERSION=1.4.0
|
||||
|
||||
# for testing
|
||||
ARG DOCKER_VERSION=26.0.0
|
||||
ARG DOCKER_VERSION=27.1.1
|
||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||
ARG GOTESTSUM_VERSION=v1.9.0
|
||||
ARG REGISTRY_VERSION=2.8.0
|
||||
ARG BUILDKIT_VERSION=v0.13.1
|
||||
ARG BUILDKIT_VERSION=v0.14.1
|
||||
ARG UNDOCK_VERSION=0.7.0
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||
FROM dockereng/cli-bin:$DOCKER_VERSION AS docker-cli
|
||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||
FROM registry:$REGISTRY_VERSION AS registry
|
||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||
@@ -27,10 +28,36 @@ WORKDIR /src
|
||||
|
||||
FROM gobase AS gotestsum
|
||||
ARG GOTESTSUM_VERSION
|
||||
ENV GOFLAGS=
|
||||
RUN --mount=target=/root/.cache,type=cache \
|
||||
GOBIN=/out/ go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" && \
|
||||
/out/gotestsum --version
|
||||
ENV GOFLAGS=""
|
||||
RUN --mount=target=/root/.cache,type=cache <<EOT
|
||||
set -ex
|
||||
go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}"
|
||||
go install "github.com/wadey/gocovmerge@latest"
|
||||
mkdir /out
|
||||
/go/bin/gotestsum --version
|
||||
mv /go/bin/gotestsum /out
|
||||
mv /go/bin/gocovmerge /out
|
||||
EOT
|
||||
COPY --chmod=755 <<"EOF" /out/gotestsumandcover
|
||||
#!/bin/sh
|
||||
set -x
|
||||
if [ -z "$GO_TEST_COVERPROFILE" ]; then
|
||||
exec gotestsum "$@"
|
||||
fi
|
||||
coverdir="$(dirname "$GO_TEST_COVERPROFILE")"
|
||||
mkdir -p "$coverdir/helpers"
|
||||
gotestsum "$@" "-coverprofile=$GO_TEST_COVERPROFILE"
|
||||
ecode=$?
|
||||
go tool covdata textfmt -i=$coverdir/helpers -o=$coverdir/helpers-report.txt
|
||||
gocovmerge "$coverdir/helpers-report.txt" "$GO_TEST_COVERPROFILE" > "$coverdir/merged-report.txt"
|
||||
mv "$coverdir/merged-report.txt" "$GO_TEST_COVERPROFILE"
|
||||
rm "$coverdir/helpers-report.txt"
|
||||
for f in "$coverdir/helpers"/*; do
|
||||
rm "$f"
|
||||
done
|
||||
rmdir "$coverdir/helpers"
|
||||
exit $ecode
|
||||
EOF
|
||||
|
||||
FROM gobase AS buildx-version
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
@@ -42,6 +69,7 @@ EOT
|
||||
|
||||
FROM gobase AS buildx-build
|
||||
ARG TARGETPLATFORM
|
||||
ARG GO_EXTRA_FLAGS
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
@@ -88,7 +116,7 @@ RUN apk add --no-cache \
|
||||
shadow-uidmap \
|
||||
xfsprogs \
|
||||
xz
|
||||
COPY --link --from=gotestsum /out/gotestsum /usr/bin/
|
||||
COPY --link --from=gotestsum /out /usr/bin/
|
||||
COPY --link --from=registry /bin/registry /usr/bin/
|
||||
COPY --link --from=docker-engine / /usr/bin/
|
||||
COPY --link --from=docker-cli / /usr/bin/
|
||||
|
453
PROJECT.md
Normal file
453
PROJECT.md
Normal file
@@ -0,0 +1,453 @@
|
||||
# Project processing guide <!-- omit from toc -->
|
||||
|
||||
- [Project scope](#project-scope)
|
||||
- [Labels](#labels)
|
||||
- [Global](#global)
|
||||
- [`area/`](#area)
|
||||
- [`exp/`](#exp)
|
||||
- [`impact/`](#impact)
|
||||
- [`kind/`](#kind)
|
||||
- [`needs/`](#needs)
|
||||
- [`priority/`](#priority)
|
||||
- [`status/`](#status)
|
||||
- [Types of releases](#types-of-releases)
|
||||
- [Feature releases](#feature-releases)
|
||||
- [Release Candidates](#release-candidates)
|
||||
- [Support Policy](#support-policy)
|
||||
- [Contributing to Releases](#contributing-to-releases)
|
||||
- [Patch releases](#patch-releases)
|
||||
- [Milestones](#milestones)
|
||||
- [Triage process](#triage-process)
|
||||
- [Verify essential information](#verify-essential-information)
|
||||
- [Classify the issue](#classify-the-issue)
|
||||
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||
- [Issue lifecyle](#issue-lifecyle)
|
||||
- [Examples](#examples)
|
||||
- [Submitting a bug](#submitting-a-bug)
|
||||
- [Pull request review process](#pull-request-review-process)
|
||||
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||
- [Moving to a discussion](#moving-to-a-discussion)
|
||||
- [Workflow automation](#workflow-automation)
|
||||
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||
- [Updating dependencies](#updating-dependencies)
|
||||
|
||||
---
|
||||
|
||||
## Project scope
|
||||
|
||||
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||
|
||||
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||
|
||||
## Labels
|
||||
|
||||
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||
|
||||
### Global
|
||||
|
||||
General attributes that can apply to nearly any issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||
| `bot` | Issues, PRs | Created by a bot |
|
||||
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||
| `help wanted` | Issues, PRs | Assistance requested |
|
||||
| `lgtm` | PRs | “Looks good to me” approval |
|
||||
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||
|
||||
### `area/`
|
||||
|
||||
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------------------ | ---------- | -------------------------- |
|
||||
| `area/bake` | Any | `bake` |
|
||||
| `area/bake/compose` | Any | `bake/compose` |
|
||||
| `area/build` | Any | `build` |
|
||||
| `area/builder` | Any | `builder` |
|
||||
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||
| `area/cache` | Any | `cache` |
|
||||
| `area/checks` | Any | `checks` |
|
||||
| `area/ci` | Any | Project CI |
|
||||
| `area/cli` | Any | `cli` |
|
||||
| `area/controller` | Any | `controller` |
|
||||
| `area/debug` | Any | `debug` |
|
||||
| `area/dependencies` | Any | Project dependencies |
|
||||
| `area/dockerfile` | Any | `dockerfile` |
|
||||
| `area/docs` | Any | `docs` |
|
||||
| `area/driver` | Any | `driver` |
|
||||
| `area/driver/docker` | Any | `driver/docker` |
|
||||
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||
| `area/driver/remote` | Any | `driver/remote` |
|
||||
| `area/feature-parity` | Any | `feature-parity` |
|
||||
| `area/github-actions` | Any | `github-actions` |
|
||||
| `area/hack` | Any | Project hack/support |
|
||||
| `area/imagetools` | Any | `imagetools` |
|
||||
| `area/metrics` | Any | `metrics` |
|
||||
| `area/moby` | Any | Relates to `moby/moby` |
|
||||
| `area/project` | Any | Project support |
|
||||
| `area/qemu` | Any | `qemu` |
|
||||
| `area/tests` | Any | Project testing |
|
||||
| `area/windows` | Any | `windows` |
|
||||
|
||||
### `exp/`
|
||||
|
||||
Estimated experience level to complete the item
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||
|
||||
### `impact/`
|
||||
|
||||
Potential impact areas of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| -------------------- | ---------- | -------------------------------------------------- |
|
||||
| `impact/breaking` | PR | Change is API-breaking |
|
||||
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||
|
||||
|
||||
### `kind/`
|
||||
|
||||
The type of issue, pull request or discussion
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||
| `kind/chore` | Issue, PR | Project support tasks |
|
||||
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||
| `kind/duplicate` | Any | Duplicate of another item |
|
||||
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||
| `kind/feature` | Any | A brand new feature |
|
||||
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||
|
||||
### `needs/`
|
||||
|
||||
Actions or missing requirements needed by the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||
| `needs/code-review` | PR | Needs review of code |
|
||||
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||
| `needs/issue` | PR | Needs an issue |
|
||||
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||
| `needs/more-info` | Any | Needs more information from the author |
|
||||
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||
| `needs/pull-request` | Issue | Needs a pull request |
|
||||
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||
|
||||
### `priority/`
|
||||
|
||||
Level of urgency of a `kind/bug` issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||
|
||||
### `status/`
|
||||
|
||||
Current lifecycle state of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||
| `status/transfer` | Any | Transferred to another project |
|
||||
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||
|
||||
## Types of releases
|
||||
|
||||
This project has feature releases, patch releases, and security releases.
|
||||
|
||||
### Feature releases
|
||||
|
||||
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||
|
||||
#### Support Policy
|
||||
|
||||
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||
|
||||
#### Contributing to Releases
|
||||
|
||||
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||
|
||||
### Patch releases
|
||||
|
||||
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||
|
||||
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||
|
||||
Following PRs are included in patch releases
|
||||
|
||||
- `priority/P0` fixes
|
||||
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||
- `priority/P2` fixes, only if (both required)
|
||||
- proposed by maintainer
|
||||
- the patch is trivial and self-contained
|
||||
- Documentation-only patches
|
||||
- Vendored dependency updates, only if:
|
||||
- Fixing (qualifying) bug or security issue in Buildx
|
||||
- The patch is small, else a forked version of the dependency with only the patches required
|
||||
|
||||
New features do not qualify for patch release.
|
||||
|
||||
## Milestones
|
||||
|
||||
Milestones are used to help identify what releases a contribution will be in.
|
||||
|
||||
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||
|
||||
## Triage process
|
||||
|
||||
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||
|
||||
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||
- Lowering the issue count by preventing duplicate issues.
|
||||
- Streamlining the development process by preventing duplicate discussions.
|
||||
|
||||
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||
|
||||
1. Verify essential information
|
||||
2. Classify the issue
|
||||
3. Prioritizing the issue
|
||||
|
||||
### Verify essential information
|
||||
|
||||
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||
|
||||
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||
|
||||
### Classify the issue
|
||||
|
||||
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||
|
||||
- (Required) One or more [`area/*`](#area) labels
|
||||
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||
|
||||
When assigning a decision the following labels should be present:
|
||||
|
||||
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||
|
||||
Additional labels can provide more clarity:
|
||||
|
||||
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||
- Zero or more [`impact/*`](#impact) labels
|
||||
- One [`exp/*`](#exp) label
|
||||
|
||||
## Prioritization guidelines for `kind/bug`
|
||||
|
||||
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||
|
||||
**priority/P0**
|
||||
|
||||
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||
|
||||
Examples:
|
||||
|
||||
- Regression in a critical code path
|
||||
- Panic in a critical code path
|
||||
- Corruption in critical code path or rest of the system
|
||||
- Leaked zero-day critical security
|
||||
|
||||
**priority/P1**
|
||||
|
||||
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||
|
||||
Examples:
|
||||
|
||||
- Any regression, panic
|
||||
- Measurable performance regression
|
||||
- A major bug in a new feature in the latest release
|
||||
- Incompatibility with upgraded external dependency
|
||||
|
||||
**priority/P2**
|
||||
|
||||
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||
|
||||
Examples:
|
||||
|
||||
- Confirmed bugs
|
||||
- Bugs in non-default configurations
|
||||
- Most enhancements
|
||||
|
||||
## Issue lifecyle
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
create([New issue]) --> triage
|
||||
subgraph triage[Triage Loop]
|
||||
review[Review]
|
||||
end
|
||||
subgraph decision[Decision]
|
||||
accept[Accept]
|
||||
close[Close]
|
||||
end
|
||||
triage -- if accepted --> accept[Assign status, milestone]
|
||||
triage -- if rejected --> close[Assign status, close issue]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
#### Submitting a bug
|
||||
|
||||
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
|
||||
new([New issue])
|
||||
|
||||
subgraph triage[Triage]
|
||||
direction LR
|
||||
|
||||
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||
style create text-align:left
|
||||
|
||||
subgraph review[Review]
|
||||
direction TB
|
||||
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style classify text-align:left
|
||||
|
||||
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style update text-align:left
|
||||
|
||||
classify --> update
|
||||
update --> classify
|
||||
end
|
||||
|
||||
create --> review
|
||||
end
|
||||
|
||||
subgraph decision[Decision]
|
||||
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||
style accept text-align: left
|
||||
end
|
||||
|
||||
new --> triage
|
||||
triage --> decision
|
||||
```
|
||||
|
||||
## Pull request review process
|
||||
|
||||
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||
|
||||
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
triage([Triage])
|
||||
draft[Draft PR]
|
||||
review[PR Review]
|
||||
closed{{Close PR}}
|
||||
merge{{Merge PR}}
|
||||
|
||||
subgraph feedback1[Feedback Loop]
|
||||
draft
|
||||
end
|
||||
subgraph feedback2[Feedback Loop]
|
||||
review
|
||||
end
|
||||
|
||||
triage --> draft
|
||||
draft --> review
|
||||
review --> closed
|
||||
review --> draft
|
||||
review --> merge
|
||||
```
|
||||
|
||||
## Handling stalled issues and pull requests
|
||||
|
||||
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||
|
||||
**For Authors:**
|
||||
|
||||
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||
|
||||
**For Maintainers:**
|
||||
|
||||
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||
|
||||
**Stale and Rotten Policy:**
|
||||
|
||||
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||
|
||||
**Skipping Stale Processing:**
|
||||
|
||||
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||
|
||||
**Exceptions to Stale Processing:**
|
||||
|
||||
- Issues or PRs marked as **`frozen`**.
|
||||
- Issues or PRs assigned to a milestone.
|
||||
|
||||
## Moving to a discussion
|
||||
|
||||
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||
|
||||
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||
|
||||
## Workflow automation
|
||||
|
||||
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||
|
||||
- Stale issue or pull request processing
|
||||
- Auto-labeling actions
|
||||
- Auto-response actions
|
||||
- Label carry over from issue to pull request
|
||||
|
||||
### Exempting an issue/PR from stale bot processing
|
||||
|
||||
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||
|
||||
- Add the item to a milestone
|
||||
- Add the `frozen` label to the item
|
||||
|
||||
## Updating dependencies
|
||||
|
||||
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
@@ -56,8 +56,7 @@ For more information on how to use Buildx, see
|
||||
|
||||
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> [!WARNING]
|
||||
> Using an incompatible version of Docker may result in unexpected behavior,
|
||||
> and will likely cause issues, especially when using Buildx builders with more
|
||||
> recent versions of BuildKit.
|
||||
@@ -75,8 +74,7 @@ Docker Engine package repositories contain Docker Buildx packages when installed
|
||||
|
||||
## Manual download
|
||||
|
||||
> **Important**
|
||||
>
|
||||
> [!IMPORTANT]
|
||||
> This section is for unattended installation of the buildx component. These
|
||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||
> installing buildx using manual download in production environments as they
|
||||
@@ -107,8 +105,7 @@ On Windows:
|
||||
* `C:\ProgramData\Docker\cli-plugins`
|
||||
* `C:\Program Files\Docker\cli-plugins`
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||
> ```shell
|
||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
|
84
bake/bake.go
84
bake/bake.go
@@ -2,7 +2,6 @@ package bake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -26,7 +25,9 @@ import (
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
@@ -177,7 +178,7 @@ func readWithProgress(r io.Reader, setStatus func(st *client.VertexStatus)) (dt
|
||||
}
|
||||
|
||||
func ListTargets(files []File) ([]string, error) {
|
||||
c, err := ParseFiles(files, nil)
|
||||
c, _, err := ParseFiles(files, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -192,7 +193,7 @@ func ListTargets(files []File) ([]string, error) {
|
||||
}
|
||||
|
||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) {
|
||||
c, err := ParseFiles(files, defaults)
|
||||
c, _, err := ParseFiles(files, defaults)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -298,7 +299,7 @@ func sliceToMap(env []string) (res map[string]string) {
|
||||
return
|
||||
}
|
||||
|
||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, _ *hclparser.ParseMeta, err error) {
|
||||
defer func() {
|
||||
err = formatHCLError(err, files)
|
||||
}()
|
||||
@@ -310,7 +311,7 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
isCompose, composeErr := validateComposeFile(f.Data, f.Name)
|
||||
if isCompose {
|
||||
if composeErr != nil {
|
||||
return nil, composeErr
|
||||
return nil, nil, composeErr
|
||||
}
|
||||
composeFiles = append(composeFiles, f)
|
||||
}
|
||||
@@ -318,13 +319,13 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
||||
if isHCL {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
hclFiles = append(hclFiles, hf)
|
||||
} else if composeErr != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse %s: parsing yaml: %v, parsing hcl", f.Name, composeErr)
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse %s: parsing yaml: %v, parsing hcl", f.Name, composeErr)
|
||||
} else {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -332,23 +333,24 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
if len(composeFiles) > 0 {
|
||||
cfg, cmperr := ParseComposeFiles(composeFiles)
|
||||
if cmperr != nil {
|
||||
return nil, errors.Wrap(cmperr, "failed to parse compose file")
|
||||
return nil, nil, errors.Wrap(cmperr, "failed to parse compose file")
|
||||
}
|
||||
c = mergeConfig(c, *cfg)
|
||||
c = dedupeConfig(c)
|
||||
}
|
||||
|
||||
var pm hclparser.ParseMeta
|
||||
if len(hclFiles) > 0 {
|
||||
renamed, err := hclparser.Parse(hclparser.MergeFiles(hclFiles), hclparser.Opt{
|
||||
res, err := hclparser.Parse(hclparser.MergeFiles(hclFiles), hclparser.Opt{
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
ValidateLabel: validateTargetName,
|
||||
}, &c)
|
||||
if err.HasErrors() {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, renamed := range renamed {
|
||||
for _, renamed := range res.Renamed {
|
||||
for oldName, newNames := range renamed {
|
||||
newNames = dedupSlice(newNames)
|
||||
if len(newNames) == 1 && oldName == newNames[0] {
|
||||
@@ -361,9 +363,10 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
}
|
||||
}
|
||||
c = dedupeConfig(c)
|
||||
pm = *res
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
return &c, &pm, nil
|
||||
}
|
||||
|
||||
func dedupeConfig(c Config) Config {
|
||||
@@ -388,7 +391,8 @@ func dedupeConfig(c Config) Config {
|
||||
}
|
||||
|
||||
func ParseFile(dt []byte, fn string) (*Config, error) {
|
||||
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||
c, _, err := ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||
return c, err
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@@ -491,7 +495,7 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t2.Outputs = nil
|
||||
t2.Outputs = []string{"type=cacheonly"}
|
||||
t2.linked = true
|
||||
m[target] = t2
|
||||
}
|
||||
@@ -539,7 +543,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
o := t[kk[1]]
|
||||
|
||||
switch keys[1] {
|
||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest":
|
||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
||||
if len(parts) == 2 {
|
||||
o.ArrValue = append(o.ArrValue, parts[1])
|
||||
}
|
||||
@@ -669,13 +673,15 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||
Targets []string `json:"targets" hcl:"targets" cty:"targets"`
|
||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||
Description string `json:"description,omitempty" hcl:"description,optional" cty:"description"`
|
||||
Targets []string `json:"targets" hcl:"targets" cty:"targets"`
|
||||
// Target // TODO?
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||
Description string `json:"description,omitempty" hcl:"description,optional" cty:"description"`
|
||||
|
||||
// Inherits is the only field that cannot be overridden with --set
|
||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
||||
@@ -698,11 +704,13 @@ type Target struct {
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
||||
NetworkMode *string `json:"-" hcl:"-" cty:"-"`
|
||||
NetworkMode *string `json:"network" hcl:"network" cty:"network"`
|
||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
||||
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
||||
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/bake-reference.md.
|
||||
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
||||
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
||||
|
||||
// linked is a private field to mark a target used as a linked one
|
||||
linked bool
|
||||
@@ -726,6 +734,12 @@ func (t *Target) normalize() {
|
||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
||||
t.Ulimits = removeDupes(t.Ulimits)
|
||||
|
||||
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
||||
t.Entitlements = append(t.Entitlements, "network.host")
|
||||
}
|
||||
|
||||
t.Entitlements = removeDupes(t.Entitlements)
|
||||
|
||||
for k, v := range t.Contexts {
|
||||
if v == "" {
|
||||
delete(t.Contexts, k)
|
||||
@@ -776,6 +790,9 @@ func (t *Target) Merge(t2 *Target) {
|
||||
if t2.Target != nil {
|
||||
t.Target = t2.Target
|
||||
}
|
||||
if t2.Call != nil {
|
||||
t.Call = t2.Call
|
||||
}
|
||||
if t2.Annotations != nil { // merge
|
||||
t.Annotations = append(t.Annotations, t2.Annotations...)
|
||||
}
|
||||
@@ -819,6 +836,12 @@ func (t *Target) Merge(t2 *Target) {
|
||||
if t2.Ulimits != nil { // merge
|
||||
t.Ulimits = append(t.Ulimits, t2.Ulimits...)
|
||||
}
|
||||
if t2.Description != "" {
|
||||
t.Description = t2.Description
|
||||
}
|
||||
if t2.Entitlements != nil { // merge
|
||||
t.Entitlements = append(t.Entitlements, t2.Entitlements...)
|
||||
}
|
||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||
}
|
||||
|
||||
@@ -863,6 +886,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.CacheTo = o.ArrValue
|
||||
case "target":
|
||||
t.Target = &value
|
||||
case "call":
|
||||
t.Call = &value
|
||||
case "secrets":
|
||||
t.Secrets = o.ArrValue
|
||||
case "ssh":
|
||||
@@ -871,6 +896,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.Platforms = o.ArrValue
|
||||
case "output":
|
||||
t.Outputs = o.ArrValue
|
||||
case "entitlements":
|
||||
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
||||
case "annotations":
|
||||
t.Annotations = append(t.Annotations, o.ArrValue...)
|
||||
case "attest":
|
||||
@@ -887,6 +914,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.ShmSize = &value
|
||||
case "ulimits":
|
||||
t.Ulimits = o.ArrValue
|
||||
case "network":
|
||||
t.NetworkMode = &value
|
||||
case "pull":
|
||||
pull, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@@ -1298,6 +1327,12 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
bo.Target = *t.Target
|
||||
}
|
||||
|
||||
if t.Call != nil {
|
||||
bo.CallFunc = &build.CallFunc{
|
||||
Name: *t.Call,
|
||||
}
|
||||
}
|
||||
|
||||
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1348,6 +1383,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
}
|
||||
bo.Ulimits = ulimits
|
||||
|
||||
for _, ent := range t.Entitlements {
|
||||
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
||||
}
|
||||
|
||||
return bo, nil
|
||||
}
|
||||
|
||||
@@ -1393,8 +1432,7 @@ func removeAttestDupes(s []string) []string {
|
||||
}
|
||||
|
||||
func parseOutput(str string) map[string]string {
|
||||
csvReader := csv.NewReader(strings.NewReader(str))
|
||||
fields, err := csvReader.Read()
|
||||
fields, err := csvvalue.Fields(str, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -838,7 +839,8 @@ func TestReadContextFromTargetChain(t *testing.T) {
|
||||
|
||||
mid, ok := m["mid"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, len(mid.Outputs))
|
||||
require.Equal(t, 1, len(mid.Outputs))
|
||||
require.Equal(t, "type=cacheonly", mid.Outputs[0])
|
||||
require.Equal(t, 1, len(mid.Contexts))
|
||||
|
||||
base, ok := m["base"]
|
||||
@@ -1528,7 +1530,7 @@ services:
|
||||
v2: "bar"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.foo"},
|
||||
{Data: dt2, Name: "c2.bar"},
|
||||
}, nil)
|
||||
@@ -1725,3 +1727,132 @@ func TestAnnotations(t *testing.T) {
|
||||
require.Len(t, bo["app"].Exports, 1)
|
||||
require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
|
||||
}
|
||||
|
||||
func TestHCLEntitlements(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
entitlements = ["security.insecure", "network.host"]
|
||||
}`),
|
||||
}
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 2)
|
||||
require.Equal(t, "security.insecure", m["app"].Entitlements[0])
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
||||
|
||||
require.Len(t, bo["app"].Allow, 2)
|
||||
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
|
||||
}
|
||||
|
||||
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
}`),
|
||||
}
|
||||
|
||||
fp2 := File{
|
||||
Name: "docker-compose.yml",
|
||||
Data: []byte(
|
||||
`services:
|
||||
app:
|
||||
build:
|
||||
network: "host"
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 1)
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
func TestEntitlementsForNetHost(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
network = "host"
|
||||
}`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 1)
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
func TestNetNone(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
network = "none"
|
||||
}`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 0)
|
||||
require.Equal(t, "none", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 0)
|
||||
require.Equal(t, "none", bo["app"].NetworkMode)
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/consts"
|
||||
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||
"github.com/compose-spec/compose-go/v2/loader"
|
||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||
@@ -40,7 +41,11 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
ConfigFiles: cfgs,
|
||||
Environment: envs,
|
||||
}, func(options *loader.Options) {
|
||||
options.SetProjectName("bake", false)
|
||||
projectName := "bake"
|
||||
if v, ok := envs[consts.ComposeProjectName]; ok && v != "" {
|
||||
projectName = v
|
||||
}
|
||||
options.SetProjectName(projectName, false)
|
||||
options.SkipNormalization = true
|
||||
options.Profiles = []string{"*"}
|
||||
})
|
||||
|
@@ -758,6 +758,46 @@ services:
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCgroup(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: ./webapp
|
||||
cgroup: private
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProjectName(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: ./webapp
|
||||
args:
|
||||
PROJECT_NAME: ${COMPOSE_PROJECT_NAME}
|
||||
`)
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.Targets, 1)
|
||||
require.Len(t, c.Targets[0].Args, 1)
|
||||
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("bake")}, c.Targets[0].Args)
|
||||
})
|
||||
|
||||
t.Run("env", func(t *testing.T) {
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, map[string]string{"COMPOSE_PROJECT_NAME": "foo"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.Targets, 1)
|
||||
require.Len(t, c.Targets[0].Args, 1)
|
||||
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("foo")}, c.Targets[0].Args)
|
||||
})
|
||||
}
|
||||
|
||||
// chdir changes the current working directory to the named directory,
|
||||
// and then restore the original working directory at the end of the test.
|
||||
func chdir(t *testing.T, dir string) {
|
||||
|
175
bake/entitlements.go
Normal file
175
bake/entitlements.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type EntitlementKey string
|
||||
|
||||
const (
|
||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||
EntitlementKeyFS EntitlementKey = "fs"
|
||||
EntitlementKeyImagePush EntitlementKey = "image.push"
|
||||
EntitlementKeyImageLoad EntitlementKey = "image.load"
|
||||
EntitlementKeyImage EntitlementKey = "image"
|
||||
EntitlementKeySSH EntitlementKey = "ssh"
|
||||
)
|
||||
|
||||
type EntitlementConf struct {
|
||||
NetworkHost bool
|
||||
SecurityInsecure bool
|
||||
FSRead []string
|
||||
FSWrite []string
|
||||
ImagePush []string
|
||||
ImageLoad []string
|
||||
SSH bool
|
||||
}
|
||||
|
||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||
var conf EntitlementConf
|
||||
for _, e := range in {
|
||||
switch e {
|
||||
case string(EntitlementKeyNetworkHost):
|
||||
conf.NetworkHost = true
|
||||
case string(EntitlementKeySecurityInsecure):
|
||||
conf.SecurityInsecure = true
|
||||
case string(EntitlementKeySSH):
|
||||
conf.SSH = true
|
||||
default:
|
||||
k, v, _ := strings.Cut(e, "=")
|
||||
switch k {
|
||||
case string(EntitlementKeyFSRead):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
case string(EntitlementKeyFSWrite):
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyFS):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyImagePush):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
case string(EntitlementKeyImageLoad):
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
case string(EntitlementKeyImage):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
default:
|
||||
return conf, errors.Errorf("uknown entitlement key %q", k)
|
||||
}
|
||||
|
||||
// TODO: dedupe slices and parent paths
|
||||
}
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) {
|
||||
var expected EntitlementConf
|
||||
|
||||
for _, v := range m {
|
||||
if err := c.check(v, &expected); err != nil {
|
||||
return EntitlementConf{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return expected, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||
for _, e := range bo.Allow {
|
||||
switch e {
|
||||
case entitlements.EntitlementNetworkHost:
|
||||
if !c.NetworkHost {
|
||||
expected.NetworkHost = true
|
||||
}
|
||||
case entitlements.EntitlementSecurityInsecure:
|
||||
if !c.SecurityInsecure {
|
||||
expected.SecurityInsecure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||
term = true
|
||||
}
|
||||
|
||||
var msgs []string
|
||||
var flags []string
|
||||
|
||||
if c.NetworkHost {
|
||||
msgs = append(msgs, " - Running build containers that can access host network")
|
||||
flags = append(flags, "network.host")
|
||||
}
|
||||
if c.SecurityInsecure {
|
||||
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||
flags = append(flags, "security.insecure")
|
||||
}
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||
for _, m := range msgs {
|
||||
fmt.Fprintf(out, "%s\n", m)
|
||||
}
|
||||
|
||||
for i, f := range flags {
|
||||
flags[i] = "--allow=" + f
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||
} else {
|
||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
args := append([]string(nil), os.Args...)
|
||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||
args[0] = v
|
||||
}
|
||||
idx := slices.Index(args, "bake")
|
||||
|
||||
if idx != -1 {
|
||||
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " "))
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
answerCh := make(chan string, 1)
|
||||
go func() {
|
||||
answer, _, _ := reader.ReadLine()
|
||||
answerCh <- string(answer)
|
||||
close(answerCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case answer := <-answerCh:
|
||||
if strings.ToLower(string(answer)) == "y" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("additional privileges requested")
|
||||
}
|
@@ -273,7 +273,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -285,7 +285,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
||||
|
||||
t.Setenv("FOO", "def")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
c, _, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -322,7 +322,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -334,7 +334,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
||||
|
||||
t.Setenv("BASE", "new")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
c, _, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -612,7 +612,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
||||
FOO="def"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -623,7 +623,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
||||
|
||||
t.Setenv("FOO", "ghi")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
c, _, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -647,7 +647,7 @@ func TestHCLMultiFileGlobalAttrs(t *testing.T) {
|
||||
FOO = "def"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
@@ -830,7 +830,7 @@ func TestHCLRenameMultiFile(t *testing.T) {
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
{Data: dt3, Name: "c3.hcl"},
|
||||
@@ -1050,7 +1050,7 @@ func TestHCLMatrixArgsOverride(t *testing.T) {
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "docker-bake.hcl"},
|
||||
}, map[string]string{"ABC": "11,22,33"})
|
||||
require.NoError(t, err)
|
||||
@@ -1236,7 +1236,7 @@ services:
|
||||
v2: "bar"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.yml"},
|
||||
}, nil)
|
||||
@@ -1258,7 +1258,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
}, map[string]string{
|
||||
"BAKE_CMD_CONTEXT": "foo",
|
||||
@@ -1272,7 +1272,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCombineHCLAndJSONTargets(t *testing.T) {
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
@@ -1348,7 +1348,7 @@ target "b" {
|
||||
}
|
||||
|
||||
func TestCombineHCLAndJSONVars(t *testing.T) {
|
||||
c, err := ParseFiles([]File{
|
||||
c, _, err := ParseFiles([]File{
|
||||
{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
|
@@ -25,9 +25,11 @@ type Opt struct {
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||
Body hcl.Body `json:"-" hcl:",body"`
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||
Description string `json:"description,omitempty" hcl:"description,optional"`
|
||||
Body hcl.Body `json:"-" hcl:",body"`
|
||||
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||
}
|
||||
|
||||
type functionDef struct {
|
||||
@@ -73,7 +75,12 @@ type WithGetName interface {
|
||||
GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(hcl.Expression) hcl.Diagnostics) (string, error)
|
||||
}
|
||||
|
||||
var errUndefined = errors.New("undefined")
|
||||
// errUndefined is returned when a variable or function is not defined.
|
||||
type errUndefined struct{}
|
||||
|
||||
func (errUndefined) Error() string {
|
||||
return "undefined"
|
||||
}
|
||||
|
||||
func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map[string]struct{}, allowMissing bool) hcl.Diagnostics {
|
||||
fns, hcldiags := funcCalls(exp)
|
||||
@@ -83,7 +90,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
|
||||
for _, fn := range fns {
|
||||
if err := p.resolveFunction(ectx, fn); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -137,7 +144,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
}
|
||||
for _, block := range blocks {
|
||||
if err := p.resolveBlock(block, target); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -145,7 +152,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
}
|
||||
} else {
|
||||
if err := p.resolveValue(ectx, v.RootName()); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -167,7 +174,7 @@ func (p *parser) resolveFunction(ectx *hcl.EvalContext, name string) error {
|
||||
}
|
||||
f, ok := p.funcs[name]
|
||||
if !ok {
|
||||
return errors.Wrapf(errUndefined, "function %q does not exist", name)
|
||||
return errors.Wrapf(errUndefined{}, "function %q does not exist", name)
|
||||
}
|
||||
if _, ok := p.progressF[key(ectx, name)]; ok {
|
||||
return errors.Errorf("function cycle not allowed for %s", name)
|
||||
@@ -257,7 +264,7 @@ func (p *parser) resolveValue(ectx *hcl.EvalContext, name string) (err error) {
|
||||
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
||||
vr, ok := p.vars[name]
|
||||
if !ok {
|
||||
return errors.Wrapf(errUndefined, "variable %q does not exist", name)
|
||||
return errors.Wrapf(errUndefined{}, "variable %q does not exist", name)
|
||||
}
|
||||
def = vr.Default
|
||||
ectx = p.ectx
|
||||
@@ -534,7 +541,18 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string, hcl.Diagnostics) {
|
||||
type Variable struct {
|
||||
Name string
|
||||
Description string
|
||||
Value *string
|
||||
}
|
||||
|
||||
type ParseMeta struct {
|
||||
Renamed map[string]map[string][]string
|
||||
AllVariables []*Variable
|
||||
}
|
||||
|
||||
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
||||
reserved := map[string]struct{}{}
|
||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||
|
||||
@@ -643,6 +661,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
||||
}
|
||||
}
|
||||
|
||||
vars := make([]*Variable, 0, len(p.vars))
|
||||
for k := range p.vars {
|
||||
if err := p.resolveValue(p.ectx, k); err != nil {
|
||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||
@@ -651,6 +670,21 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
||||
r := p.vars[k].Body.MissingItemRange()
|
||||
return nil, wrapErrorDiagnostic("Invalid value", err, &r, &r)
|
||||
}
|
||||
v := &Variable{
|
||||
Name: p.vars[k].Name,
|
||||
Description: p.vars[k].Description,
|
||||
}
|
||||
if vv := p.ectx.Variables[k]; !vv.IsNull() {
|
||||
var s string
|
||||
switch vv.Type() {
|
||||
case cty.String:
|
||||
s = vv.AsString()
|
||||
case cty.Bool:
|
||||
s = strconv.FormatBool(vv.True())
|
||||
}
|
||||
v.Value = &s
|
||||
}
|
||||
vars = append(vars, v)
|
||||
}
|
||||
|
||||
for k := range p.funcs {
|
||||
@@ -795,7 +829,10 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
||||
}
|
||||
}
|
||||
|
||||
return renamed, nil
|
||||
return &ParseMeta{
|
||||
Renamed: renamed,
|
||||
AllVariables: vars,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// wrapErrorDiagnostic wraps an error into a hcl.Diagnostics object.
|
||||
|
@@ -111,21 +111,19 @@ func (mb mergedBodies) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||
diags = append(diags, thisDiags...)
|
||||
}
|
||||
|
||||
if thisAttrs != nil {
|
||||
for name, attr := range thisAttrs {
|
||||
if existing := attrs[name]; existing != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Duplicate argument",
|
||||
Detail: fmt.Sprintf(
|
||||
"Argument %q was already set at %s",
|
||||
name, existing.NameRange.String(),
|
||||
),
|
||||
Subject: thisAttrs[name].NameRange.Ptr(),
|
||||
})
|
||||
}
|
||||
attrs[name] = attr
|
||||
for name, attr := range thisAttrs {
|
||||
if existing := attrs[name]; existing != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Duplicate argument",
|
||||
Detail: fmt.Sprintf(
|
||||
"Argument %q was already set at %s",
|
||||
name, existing.NameRange.String(),
|
||||
),
|
||||
Subject: thisAttrs[name].NameRange.Ptr(),
|
||||
})
|
||||
}
|
||||
attrs[name] = attr
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||
@@ -9,174 +12,251 @@ import (
|
||||
"github.com/hashicorp/go-cty-funcs/uuid"
|
||||
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
||||
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||
)
|
||||
|
||||
var stdlibFunctions = map[string]function.Function{
|
||||
"absolute": stdlib.AbsoluteFunc,
|
||||
"add": stdlib.AddFunc,
|
||||
"and": stdlib.AndFunc,
|
||||
"base64decode": encoding.Base64DecodeFunc,
|
||||
"base64encode": encoding.Base64EncodeFunc,
|
||||
"bcrypt": crypto.BcryptFunc,
|
||||
"byteslen": stdlib.BytesLenFunc,
|
||||
"bytesslice": stdlib.BytesSliceFunc,
|
||||
"can": tryfunc.CanFunc,
|
||||
"ceil": stdlib.CeilFunc,
|
||||
"chomp": stdlib.ChompFunc,
|
||||
"chunklist": stdlib.ChunklistFunc,
|
||||
"cidrhost": cidr.HostFunc,
|
||||
"cidrnetmask": cidr.NetmaskFunc,
|
||||
"cidrsubnet": cidr.SubnetFunc,
|
||||
"cidrsubnets": cidr.SubnetsFunc,
|
||||
"coalesce": stdlib.CoalesceFunc,
|
||||
"coalescelist": stdlib.CoalesceListFunc,
|
||||
"compact": stdlib.CompactFunc,
|
||||
"concat": stdlib.ConcatFunc,
|
||||
"contains": stdlib.ContainsFunc,
|
||||
"convert": typeexpr.ConvertFunc,
|
||||
"csvdecode": stdlib.CSVDecodeFunc,
|
||||
"distinct": stdlib.DistinctFunc,
|
||||
"divide": stdlib.DivideFunc,
|
||||
"element": stdlib.ElementFunc,
|
||||
"equal": stdlib.EqualFunc,
|
||||
"flatten": stdlib.FlattenFunc,
|
||||
"floor": stdlib.FloorFunc,
|
||||
"format": stdlib.FormatFunc,
|
||||
"formatdate": stdlib.FormatDateFunc,
|
||||
"formatlist": stdlib.FormatListFunc,
|
||||
"greaterthan": stdlib.GreaterThanFunc,
|
||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
||||
"hasindex": stdlib.HasIndexFunc,
|
||||
"indent": stdlib.IndentFunc,
|
||||
"index": stdlib.IndexFunc,
|
||||
"indexof": indexOfFunc,
|
||||
"int": stdlib.IntFunc,
|
||||
"join": stdlib.JoinFunc,
|
||||
"jsondecode": stdlib.JSONDecodeFunc,
|
||||
"jsonencode": stdlib.JSONEncodeFunc,
|
||||
"keys": stdlib.KeysFunc,
|
||||
"length": stdlib.LengthFunc,
|
||||
"lessthan": stdlib.LessThanFunc,
|
||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
||||
"log": stdlib.LogFunc,
|
||||
"lookup": stdlib.LookupFunc,
|
||||
"lower": stdlib.LowerFunc,
|
||||
"max": stdlib.MaxFunc,
|
||||
"md5": crypto.Md5Func,
|
||||
"merge": stdlib.MergeFunc,
|
||||
"min": stdlib.MinFunc,
|
||||
"modulo": stdlib.ModuloFunc,
|
||||
"multiply": stdlib.MultiplyFunc,
|
||||
"negate": stdlib.NegateFunc,
|
||||
"not": stdlib.NotFunc,
|
||||
"notequal": stdlib.NotEqualFunc,
|
||||
"or": stdlib.OrFunc,
|
||||
"parseint": stdlib.ParseIntFunc,
|
||||
"pow": stdlib.PowFunc,
|
||||
"range": stdlib.RangeFunc,
|
||||
"regex_replace": stdlib.RegexReplaceFunc,
|
||||
"regex": stdlib.RegexFunc,
|
||||
"regexall": stdlib.RegexAllFunc,
|
||||
"replace": stdlib.ReplaceFunc,
|
||||
"reverse": stdlib.ReverseFunc,
|
||||
"reverselist": stdlib.ReverseListFunc,
|
||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
||||
"sethaselement": stdlib.SetHasElementFunc,
|
||||
"setintersection": stdlib.SetIntersectionFunc,
|
||||
"setproduct": stdlib.SetProductFunc,
|
||||
"setsubtract": stdlib.SetSubtractFunc,
|
||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
||||
"setunion": stdlib.SetUnionFunc,
|
||||
"sha1": crypto.Sha1Func,
|
||||
"sha256": crypto.Sha256Func,
|
||||
"sha512": crypto.Sha512Func,
|
||||
"signum": stdlib.SignumFunc,
|
||||
"slice": stdlib.SliceFunc,
|
||||
"sort": stdlib.SortFunc,
|
||||
"split": stdlib.SplitFunc,
|
||||
"strlen": stdlib.StrlenFunc,
|
||||
"substr": stdlib.SubstrFunc,
|
||||
"subtract": stdlib.SubtractFunc,
|
||||
"timeadd": stdlib.TimeAddFunc,
|
||||
"timestamp": timestampFunc,
|
||||
"title": stdlib.TitleFunc,
|
||||
"trim": stdlib.TrimFunc,
|
||||
"trimprefix": stdlib.TrimPrefixFunc,
|
||||
"trimspace": stdlib.TrimSpaceFunc,
|
||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
||||
"try": tryfunc.TryFunc,
|
||||
"upper": stdlib.UpperFunc,
|
||||
"urlencode": encoding.URLEncodeFunc,
|
||||
"uuidv4": uuid.V4Func,
|
||||
"uuidv5": uuid.V5Func,
|
||||
"values": stdlib.ValuesFunc,
|
||||
"zipmap": stdlib.ZipmapFunc,
|
||||
type funcDef struct {
|
||||
name string
|
||||
fn function.Function
|
||||
factory func() function.Function
|
||||
}
|
||||
|
||||
var stdlibFunctions = []funcDef{
|
||||
{name: "absolute", fn: stdlib.AbsoluteFunc},
|
||||
{name: "add", fn: stdlib.AddFunc},
|
||||
{name: "and", fn: stdlib.AndFunc},
|
||||
{name: "base64decode", fn: encoding.Base64DecodeFunc},
|
||||
{name: "base64encode", fn: encoding.Base64EncodeFunc},
|
||||
{name: "basename", factory: basenameFunc},
|
||||
{name: "bcrypt", fn: crypto.BcryptFunc},
|
||||
{name: "byteslen", fn: stdlib.BytesLenFunc},
|
||||
{name: "bytesslice", fn: stdlib.BytesSliceFunc},
|
||||
{name: "can", fn: tryfunc.CanFunc},
|
||||
{name: "ceil", fn: stdlib.CeilFunc},
|
||||
{name: "chomp", fn: stdlib.ChompFunc},
|
||||
{name: "chunklist", fn: stdlib.ChunklistFunc},
|
||||
{name: "cidrhost", fn: cidr.HostFunc},
|
||||
{name: "cidrnetmask", fn: cidr.NetmaskFunc},
|
||||
{name: "cidrsubnet", fn: cidr.SubnetFunc},
|
||||
{name: "cidrsubnets", fn: cidr.SubnetsFunc},
|
||||
{name: "coalesce", fn: stdlib.CoalesceFunc},
|
||||
{name: "coalescelist", fn: stdlib.CoalesceListFunc},
|
||||
{name: "compact", fn: stdlib.CompactFunc},
|
||||
{name: "concat", fn: stdlib.ConcatFunc},
|
||||
{name: "contains", fn: stdlib.ContainsFunc},
|
||||
{name: "convert", fn: typeexpr.ConvertFunc},
|
||||
{name: "csvdecode", fn: stdlib.CSVDecodeFunc},
|
||||
{name: "dirname", factory: dirnameFunc},
|
||||
{name: "distinct", fn: stdlib.DistinctFunc},
|
||||
{name: "divide", fn: stdlib.DivideFunc},
|
||||
{name: "element", fn: stdlib.ElementFunc},
|
||||
{name: "equal", fn: stdlib.EqualFunc},
|
||||
{name: "flatten", fn: stdlib.FlattenFunc},
|
||||
{name: "floor", fn: stdlib.FloorFunc},
|
||||
{name: "format", fn: stdlib.FormatFunc},
|
||||
{name: "formatdate", fn: stdlib.FormatDateFunc},
|
||||
{name: "formatlist", fn: stdlib.FormatListFunc},
|
||||
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
|
||||
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
|
||||
{name: "hasindex", fn: stdlib.HasIndexFunc},
|
||||
{name: "indent", fn: stdlib.IndentFunc},
|
||||
{name: "index", fn: stdlib.IndexFunc},
|
||||
{name: "indexof", factory: indexOfFunc},
|
||||
{name: "int", fn: stdlib.IntFunc},
|
||||
{name: "join", fn: stdlib.JoinFunc},
|
||||
{name: "jsondecode", fn: stdlib.JSONDecodeFunc},
|
||||
{name: "jsonencode", fn: stdlib.JSONEncodeFunc},
|
||||
{name: "keys", fn: stdlib.KeysFunc},
|
||||
{name: "length", fn: stdlib.LengthFunc},
|
||||
{name: "lessthan", fn: stdlib.LessThanFunc},
|
||||
{name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc},
|
||||
{name: "log", fn: stdlib.LogFunc},
|
||||
{name: "lookup", fn: stdlib.LookupFunc},
|
||||
{name: "lower", fn: stdlib.LowerFunc},
|
||||
{name: "max", fn: stdlib.MaxFunc},
|
||||
{name: "md5", fn: crypto.Md5Func},
|
||||
{name: "merge", fn: stdlib.MergeFunc},
|
||||
{name: "min", fn: stdlib.MinFunc},
|
||||
{name: "modulo", fn: stdlib.ModuloFunc},
|
||||
{name: "multiply", fn: stdlib.MultiplyFunc},
|
||||
{name: "negate", fn: stdlib.NegateFunc},
|
||||
{name: "not", fn: stdlib.NotFunc},
|
||||
{name: "notequal", fn: stdlib.NotEqualFunc},
|
||||
{name: "or", fn: stdlib.OrFunc},
|
||||
{name: "parseint", fn: stdlib.ParseIntFunc},
|
||||
{name: "pow", fn: stdlib.PowFunc},
|
||||
{name: "range", fn: stdlib.RangeFunc},
|
||||
{name: "regex_replace", fn: stdlib.RegexReplaceFunc},
|
||||
{name: "regex", fn: stdlib.RegexFunc},
|
||||
{name: "regexall", fn: stdlib.RegexAllFunc},
|
||||
{name: "replace", fn: stdlib.ReplaceFunc},
|
||||
{name: "reverse", fn: stdlib.ReverseFunc},
|
||||
{name: "reverselist", fn: stdlib.ReverseListFunc},
|
||||
{name: "rsadecrypt", fn: crypto.RsaDecryptFunc},
|
||||
{name: "sanitize", factory: sanitizeFunc},
|
||||
{name: "sethaselement", fn: stdlib.SetHasElementFunc},
|
||||
{name: "setintersection", fn: stdlib.SetIntersectionFunc},
|
||||
{name: "setproduct", fn: stdlib.SetProductFunc},
|
||||
{name: "setsubtract", fn: stdlib.SetSubtractFunc},
|
||||
{name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc},
|
||||
{name: "setunion", fn: stdlib.SetUnionFunc},
|
||||
{name: "sha1", fn: crypto.Sha1Func},
|
||||
{name: "sha256", fn: crypto.Sha256Func},
|
||||
{name: "sha512", fn: crypto.Sha512Func},
|
||||
{name: "signum", fn: stdlib.SignumFunc},
|
||||
{name: "slice", fn: stdlib.SliceFunc},
|
||||
{name: "sort", fn: stdlib.SortFunc},
|
||||
{name: "split", fn: stdlib.SplitFunc},
|
||||
{name: "strlen", fn: stdlib.StrlenFunc},
|
||||
{name: "substr", fn: stdlib.SubstrFunc},
|
||||
{name: "subtract", fn: stdlib.SubtractFunc},
|
||||
{name: "timeadd", fn: stdlib.TimeAddFunc},
|
||||
{name: "timestamp", factory: timestampFunc},
|
||||
{name: "title", fn: stdlib.TitleFunc},
|
||||
{name: "trim", fn: stdlib.TrimFunc},
|
||||
{name: "trimprefix", fn: stdlib.TrimPrefixFunc},
|
||||
{name: "trimspace", fn: stdlib.TrimSpaceFunc},
|
||||
{name: "trimsuffix", fn: stdlib.TrimSuffixFunc},
|
||||
{name: "try", fn: tryfunc.TryFunc},
|
||||
{name: "upper", fn: stdlib.UpperFunc},
|
||||
{name: "urlencode", fn: encoding.URLEncodeFunc},
|
||||
{name: "uuidv4", fn: uuid.V4Func},
|
||||
{name: "uuidv5", fn: uuid.V5Func},
|
||||
{name: "values", fn: stdlib.ValuesFunc},
|
||||
{name: "zipmap", fn: stdlib.ZipmapFunc},
|
||||
}
|
||||
|
||||
// indexOfFunc constructs a function that finds the element index for a given
|
||||
// value in a list.
|
||||
var indexOfFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "list",
|
||||
Type: cty.DynamicPseudoType,
|
||||
func indexOfFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "list",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.Number),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||
}
|
||||
|
||||
if !args[0].IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
|
||||
if args[0].LengthInt() == 0 { // Easy path
|
||||
return cty.NilVal, errors.New("cannot search an empty list")
|
||||
}
|
||||
|
||||
for it := args[0].ElementIterator(); it.Next(); {
|
||||
i, v := it.Element()
|
||||
eq, err := stdlib.Equal(v, args[1])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
Type: function.StaticReturnType(cty.Number),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||
}
|
||||
if !eq.IsKnown() {
|
||||
|
||||
if !args[0].IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
if eq.True() {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
|
||||
},
|
||||
})
|
||||
if args[0].LengthInt() == 0 { // Easy path
|
||||
return cty.NilVal, errors.New("cannot search an empty list")
|
||||
}
|
||||
|
||||
for it := args[0].ElementIterator(); it.Next(); {
|
||||
i, v := it.Element()
|
||||
eq, err := stdlib.Equal(v, args[1])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
if !eq.IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
if eq.True() {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// basenameFunc constructs a function that returns the last element of a path.
|
||||
func basenameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Base(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// dirnameFunc constructs a function that returns the directory of a path.
|
||||
func dirnameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Dir(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore,
|
||||
// leaving only characters that are valid for a Bake target name.
|
||||
func sanitizeFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "name",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
// only [a-zA-Z0-9_-]+ is allowed
|
||||
var b strings.Builder
|
||||
for _, r := range in {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return cty.StringVal(b.String()), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
||||
//
|
||||
// This function was imported from terraform's datetime utilities.
|
||||
var timestampFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
func timestampFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func Stdlib() map[string]function.Function {
|
||||
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
||||
for k, v := range stdlibFunctions {
|
||||
funcs[k] = v
|
||||
for _, v := range stdlibFunctions {
|
||||
if v.factory != nil {
|
||||
funcs[v.name] = v.factory()
|
||||
} else {
|
||||
funcs[v.name] = v.fn
|
||||
}
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package hclparser
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
@@ -34,16 +35,165 @@ func TestIndexOf(t *testing.T) {
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := indexOfFunc.Call([]cty.Value{test.input, test.key})
|
||||
if err != nil {
|
||||
if test.wantErr {
|
||||
return
|
||||
}
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !got.RawEquals(test.want) {
|
||||
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want)
|
||||
got, err := indexOfFunc().Call([]cty.Value{test.input, test.key})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasename(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("bar."),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("bar.."),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("bar..."),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := basenameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirname(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := dirnameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal(""),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo_bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foobar"),
|
||||
want: cty.StringVal("foobar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("foo/bar."),
|
||||
want: cty.StringVal("foo_bar_"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("foo/bar.."),
|
||||
want: cty.StringVal("foo_bar__"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("foo/bar..."),
|
||||
want: cty.StringVal("foo_bar___"),
|
||||
},
|
||||
"utf8": {
|
||||
input: cty.StringVal("foo/🍕bar"),
|
||||
want: cty.StringVal("foo__bar"),
|
||||
},
|
||||
"symbols": {
|
||||
input: cty.StringVal("foo/bar!@(ba+z)"),
|
||||
want: cty.StringVal("foo_bar___ba_z_"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := sanitizeFunc().Call([]cty.Value{test.input})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
268
build/build.go
268
build/build.go
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
@@ -25,13 +27,15 @@ import (
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
"github.com/docker/buildx/util/waitmap"
|
||||
"github.com/docker/cli/opts"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
@@ -42,19 +46,15 @@ import (
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
)
|
||||
|
||||
const (
|
||||
printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56"
|
||||
// moby/buildkit#3d789eb740a93ac814b078fd752307e2a8da5b84
|
||||
printLintFallbackImage = "docker.io/docker/dockerfile-upstream:master@sha256:ea5a8efbaf785bdfe6c7ad74f0b11849df42d62861b218429bea17bb078df6ca"
|
||||
printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56"
|
||||
printLintFallbackImage = "docker.io/docker/dockerfile-upstream:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
@@ -82,13 +82,13 @@ type Options struct {
|
||||
|
||||
Session []session.Attachable
|
||||
Linked bool // Linked marks this target as exclusively linked (not requested by the user).
|
||||
PrintFunc *PrintFunc
|
||||
WithProvenanceResponse bool
|
||||
CallFunc *CallFunc
|
||||
ProvenanceResponseMode confutil.MetadataProvenanceMode
|
||||
SourcePolicy *spb.Policy
|
||||
GroupRef string
|
||||
}
|
||||
|
||||
type PrintFunc struct {
|
||||
type CallFunc struct {
|
||||
Name string
|
||||
Format string
|
||||
IgnoreStatus bool
|
||||
@@ -97,7 +97,7 @@ type PrintFunc struct {
|
||||
type Inputs struct {
|
||||
ContextPath string
|
||||
DockerfilePath string
|
||||
InStream io.Reader
|
||||
InStream *SyncMultiReader
|
||||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
NamedContexts map[string]NamedContext
|
||||
@@ -169,7 +169,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
}
|
||||
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noPrintFunc(opt) {
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) {
|
||||
var noOutputTargets []string
|
||||
for name, opt := range opt {
|
||||
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
@@ -212,10 +212,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
for k, opt := range opt {
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
hasMobyDriver := false
|
||||
gitattrs, addVCSLocalDir, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("current commit information was not captured by the build")
|
||||
}
|
||||
if opt.Ref == "" {
|
||||
opt.Ref = identity.NewID()
|
||||
}
|
||||
var reqn []*reqForNode
|
||||
for _, np := range drivers[k] {
|
||||
if np.Node().Driver.IsMobyDriver() {
|
||||
@@ -226,16 +229,14 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, addVCSLocalDir, w, docker)
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range gitattrs {
|
||||
so.FrontendAttrs[k] = v
|
||||
}
|
||||
addGitAttrs(so)
|
||||
defers = append(defers, release)
|
||||
reqn = append(reqn, &reqForNode{
|
||||
resolvedNode: np,
|
||||
@@ -294,6 +295,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
}
|
||||
|
||||
sharedSessions, err := detectSharedMounts(ctx, reqForNodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedSessionsWG := map[string]*sync.WaitGroup{}
|
||||
|
||||
resp = map[string]*client.SolveResponse{}
|
||||
var respMu sync.Mutex
|
||||
results := waitmap.New()
|
||||
@@ -302,7 +309,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
childTargets := calculateChildTargets(reqForNodes, opt)
|
||||
|
||||
for k, opt := range opt {
|
||||
err := func(k string) error {
|
||||
err := func(k string) (err error) {
|
||||
opt := opt
|
||||
dps := drivers[k]
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
@@ -314,6 +321,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
baseCtx := ctx
|
||||
|
||||
if multiTarget {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "target %s", k)
|
||||
}()
|
||||
}
|
||||
|
||||
res := make([]*client.SolveResponse, len(dps))
|
||||
eg2, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
@@ -358,7 +371,37 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var done func()
|
||||
if sessions, ok := sharedSessions[node.Name]; ok {
|
||||
wg, ok := sharedSessionsWG[node.Name]
|
||||
if ok {
|
||||
wg.Add(1)
|
||||
} else {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
sharedSessionsWG[node.Name] = wg
|
||||
for _, s := range sessions {
|
||||
s := s
|
||||
eg.Go(func() error {
|
||||
return s.Run(baseCtx, c.Dialer())
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
for _, s := range sessions {
|
||||
s.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
done = wg.Done
|
||||
}
|
||||
|
||||
eg2.Go(func() error {
|
||||
if done != nil {
|
||||
defer done()
|
||||
}
|
||||
|
||||
pw = progress.ResetTime(pw)
|
||||
|
||||
if err := waitContextDeps(ctx, dp.driverIndex, results, so); err != nil {
|
||||
@@ -389,15 +432,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
defer func() { <-done }()
|
||||
|
||||
cc := c
|
||||
var printRes map[string][]byte
|
||||
var callRes map[string][]byte
|
||||
buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
if opt.PrintFunc != nil {
|
||||
if opt.CallFunc != nil {
|
||||
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
||||
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
||||
} else {
|
||||
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
||||
}
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.CallFunc.Name
|
||||
}
|
||||
|
||||
res, err := c.Solve(ctx, req)
|
||||
@@ -413,8 +456,8 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opt.PrintFunc != nil {
|
||||
printRes = res.Metadata
|
||||
if opt.CallFunc != nil {
|
||||
callRes = res.Metadata
|
||||
}
|
||||
|
||||
rKey := resultKey(dp.driverIndex, k)
|
||||
@@ -470,13 +513,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if rr.ExporterResponse == nil {
|
||||
rr.ExporterResponse = map[string]string{}
|
||||
}
|
||||
for k, v := range printRes {
|
||||
for k, v := range callRes {
|
||||
rr.ExporterResponse[k] = string(v)
|
||||
}
|
||||
rr.ExporterResponse["buildx.build.ref"] = buildRef
|
||||
if opt.WithProvenanceResponse && node.Driver.HistoryAPISupported(ctx) {
|
||||
if err := setRecordProvenance(ctx, c, rr, so.Ref, pw); err != nil {
|
||||
return err
|
||||
if opt.CallFunc == nil {
|
||||
rr.ExporterResponse["buildx.build.ref"] = buildRef
|
||||
if node.Driver.HistoryAPISupported(ctx) {
|
||||
if err := setRecordProvenance(ctx, c, rr, so.Ref, opt.ProvenanceResponseMode, pw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -526,6 +571,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
tracing.FinishWithError(span, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if multiTarget {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "target %s", k)
|
||||
}()
|
||||
}
|
||||
|
||||
pw := progress.WithPrefix(w, "default", false)
|
||||
if err := eg2.Wait(); err != nil {
|
||||
return err
|
||||
@@ -608,7 +660,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
}
|
||||
|
||||
dt, desc, err := itpull.Combine(ctx, srcs, nil, false)
|
||||
indexAnnotations, err := extractIndexAnnotations(opt.Exports)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dt, desc, err := itpull.Combine(ctx, srcs, indexAnnotations, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -656,6 +713,27 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func extractIndexAnnotations(exports []client.ExportEntry) (map[exptypes.AnnotationKey]string, error) {
|
||||
annotations := map[exptypes.AnnotationKey]string{}
|
||||
for _, exp := range exports {
|
||||
for k, v := range exp.Attrs {
|
||||
ak, ok, err := exptypes.ParseAnnotationKey(k)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch ak.Type {
|
||||
case exptypes.AnnotationIndex, exptypes.AnnotationManifestDescriptor:
|
||||
annotations[ak] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func pushWithMoby(ctx context.Context, d *driver.DriverHandle, name string, l progress.SubLogger) error {
|
||||
api := d.Config().DockerAPI
|
||||
if api == nil {
|
||||
@@ -666,7 +744,7 @@ func pushWithMoby(ctx context.Context, d *driver.DriverHandle, name string, l pr
|
||||
return err
|
||||
}
|
||||
|
||||
rc, err := api.ImagePush(ctx, name, imagetypes.PushOptions{
|
||||
rc, err := api.ImagePush(ctx, name, image.PushOptions{
|
||||
RegistryAuth: creds,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -745,11 +823,11 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
img, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(image.RepoDigests) == 0 {
|
||||
if len(img.RepoDigests) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
||||
@@ -763,6 +841,124 @@ func resultKey(index int, name string) string {
|
||||
return fmt.Sprintf("%d-%s", index, name)
|
||||
}
|
||||
|
||||
// detectSharedMounts looks for same local mounts used by multiple requests to the same node
|
||||
// and creates a separate session that will be used by all detected requests.
|
||||
func detectSharedMounts(ctx context.Context, reqs map[string][]*reqForNode) (_ map[string][]*session.Session, err error) {
|
||||
type fsTracker struct {
|
||||
fs fsutil.FS
|
||||
so []*client.SolveOpt
|
||||
}
|
||||
type fsKey struct {
|
||||
name string
|
||||
dir string
|
||||
}
|
||||
|
||||
m := map[string]map[fsKey]*fsTracker{}
|
||||
for _, reqs := range reqs {
|
||||
for _, req := range reqs {
|
||||
nodeName := req.resolvedNode.Node().Name
|
||||
if _, ok := m[nodeName]; !ok {
|
||||
m[nodeName] = map[fsKey]*fsTracker{}
|
||||
}
|
||||
fsMap := m[nodeName]
|
||||
for name, m := range req.so.LocalMounts {
|
||||
fs, ok := m.(*fs)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key := fsKey{name: name, dir: fs.dir}
|
||||
if _, ok := fsMap[key]; !ok {
|
||||
fsMap[key] = &fsTracker{fs: fs.FS}
|
||||
}
|
||||
fsMap[key].so = append(fsMap[key].so, req.so)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sharedSession struct {
|
||||
*session.Session
|
||||
fsMap map[string]fsutil.FS
|
||||
}
|
||||
|
||||
sessionMap := map[string][]*sharedSession{}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, sessions := range sessionMap {
|
||||
for _, s := range sessions {
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for node, fsMap := range m {
|
||||
for key, fs := range fsMap {
|
||||
if len(fs.so) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
sessions := sessionMap[node]
|
||||
|
||||
// find session that doesn't have the fs name reserved
|
||||
idx := slices.IndexFunc(sessions, func(s *sharedSession) bool {
|
||||
_, ok := s.fsMap[key.name]
|
||||
return !ok
|
||||
})
|
||||
|
||||
var ss *sharedSession
|
||||
if idx == -1 {
|
||||
s, err := session.NewSession(ctx, fs.so[0].SharedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss = &sharedSession{Session: s, fsMap: map[string]fsutil.FS{}}
|
||||
sessions = append(sessions, ss)
|
||||
sessionMap[node] = sessions
|
||||
} else {
|
||||
ss = sessions[idx]
|
||||
}
|
||||
|
||||
ss.fsMap[key.name] = fs.fs
|
||||
for _, so := range fs.so {
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = map[string]string{}
|
||||
}
|
||||
so.FrontendAttrs["local-sessionid:"+key.name] = ss.ID()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
|
||||
st.Uid = 0
|
||||
st.Gid = 0
|
||||
return fsutil.MapResultKeep
|
||||
}
|
||||
|
||||
// convert back to regular sessions
|
||||
sessions := map[string][]*session.Session{}
|
||||
for n, ss := range sessionMap {
|
||||
arr := make([]*session.Session, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
arr = append(arr, s.Session)
|
||||
|
||||
src := make(filesync.StaticDirSource, len(s.fsMap))
|
||||
for name, fs := range s.fsMap {
|
||||
fs, err := fsutil.NewFilterFS(fs, &fsutil.FilterOpt{
|
||||
Map: resetUIDAndGID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src[name] = fs
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncProvider(src))
|
||||
}
|
||||
sessions[n] = arr
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
// calculateChildTargets returns all the targets that depend on current target for reverse index
|
||||
func calculateChildTargets(reqs map[string][]*reqForNode, opt map[string]Options) map[string][]string {
|
||||
out := make(map[string][]string)
|
||||
@@ -910,9 +1106,9 @@ func fallbackPrintError(err error, req gateway.SolveRequest) (gateway.SolveReque
|
||||
return req, false
|
||||
}
|
||||
|
||||
func noPrintFunc(opt map[string]Options) bool {
|
||||
func noCallFunc(opt map[string]Options) bool {
|
||||
for _, v := range opt {
|
||||
if v.PrintFunc != nil {
|
||||
if v.CallFunc != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
stderrors "errors"
|
||||
"net"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
71
build/git.go
71
build/git.go
@@ -17,10 +17,19 @@ import (
|
||||
|
||||
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (map[string]string, func(key, dir string, so *client.SolveOpt), error) {
|
||||
res := make(map[string]string)
|
||||
type gitAttrsAppendFunc func(so *client.SolveOpt)
|
||||
|
||||
func gitAppendNoneFunc(_ *client.SolveOpt) {}
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) {
|
||||
defer func() {
|
||||
if f == nil {
|
||||
f = gitAppendNoneFunc
|
||||
}
|
||||
}()
|
||||
|
||||
if contextPath == "" {
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
setGitLabels := false
|
||||
@@ -39,7 +48,7 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
}
|
||||
|
||||
if !setGitLabels && !setGitInfo {
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// figure out in which directory the git command needs to run in
|
||||
@@ -54,25 +63,27 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||
if err != nil {
|
||||
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||
return res, nil, errors.Wrap(err, "git was not found in the system")
|
||||
return nil, errors.Wrap(err, "git was not found in the system")
|
||||
}
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !gitc.IsInsideWorkTree() {
|
||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||
return res, nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
}
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
root, err := gitc.RootDir()
|
||||
if err != nil {
|
||||
return res, nil, errors.Wrap(err, "failed to get git root dir")
|
||||
return nil, errors.Wrap(err, "failed to get git root dir")
|
||||
}
|
||||
|
||||
res := make(map[string]string)
|
||||
|
||||
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
||||
return res, nil, errors.Wrap(err, "failed to get git commit")
|
||||
return nil, errors.Wrap(err, "failed to get git commit")
|
||||
} else if sha != "" {
|
||||
checkDirty := false
|
||||
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
||||
@@ -112,20 +123,38 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
}
|
||||
}
|
||||
|
||||
return res, func(key, dir string, so *client.SolveOpt) {
|
||||
return func(so *client.SolveOpt) {
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = make(map[string]string)
|
||||
}
|
||||
for k, v := range res {
|
||||
so.FrontendAttrs[k] = v
|
||||
}
|
||||
|
||||
if !setGitInfo || root == "" {
|
||||
return
|
||||
}
|
||||
dir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||
dir = lp
|
||||
}
|
||||
dir = osutil.SanitizePath(dir)
|
||||
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||
|
||||
for key, mount := range so.LocalMounts {
|
||||
fs, ok := mount.(*fs)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dir, err = filepath.Abs(dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||
dir = lp
|
||||
}
|
||||
dir = osutil.SanitizePath(dir)
|
||||
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ func setupTest(tb testing.TB) {
|
||||
}
|
||||
|
||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||
_, _, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -39,16 +39,18 @@ func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(path.Join(tmp, ".git"), 0755))
|
||||
|
||||
_, _, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
|
||||
_, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetGitAttributesNoContext(t *testing.T) {
|
||||
setupTest(t)
|
||||
|
||||
gitattrs, _, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, gitattrs)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
assert.Empty(t, so.FrontendAttrs)
|
||||
}
|
||||
|
||||
func TestGetGitAttributes(t *testing.T) {
|
||||
@@ -115,15 +117,17 @@ func TestGetGitAttributes(t *testing.T) {
|
||||
if tt.envGitInfo != "" {
|
||||
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
||||
}
|
||||
gitattrs, _, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
for _, e := range tt.expected {
|
||||
assert.Contains(t, gitattrs, e)
|
||||
assert.NotEmpty(t, gitattrs[e])
|
||||
assert.Contains(t, so.FrontendAttrs, e)
|
||||
assert.NotEmpty(t, so.FrontendAttrs[e])
|
||||
if e == "label:"+DockerfileLabel {
|
||||
assert.Equal(t, "Dockerfile", gitattrs[e])
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs[e])
|
||||
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e])
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -140,20 +144,25 @@ func TestGetGitAttributesDirty(t *testing.T) {
|
||||
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
||||
|
||||
t.Setenv("BUILDX_GIT_LABELS", "true")
|
||||
gitattrs, _, _ := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
assert.Equal(t, 5, len(gitattrs))
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
|
||||
assert.Contains(t, gitattrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
|
||||
assert.Contains(t, gitattrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
|
||||
assert.Equal(t, 5, len(so.FrontendAttrs))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"])
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty"))
|
||||
}
|
||||
|
||||
func TestLocalDirs(t *testing.T) {
|
||||
@@ -163,15 +172,17 @@ func TestLocalDirs(t *testing.T) {
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
|
||||
_, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, addVCSLocalDir)
|
||||
|
||||
require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir))
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", ".", so))
|
||||
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.NoError(t, setLocalMount("dockerfile", ".", so, addVCSLocalDir))
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
}
|
||||
@@ -194,16 +205,17 @@ func TestLocalDirsSub(t *testing.T) {
|
||||
so := &client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", "app", so))
|
||||
|
||||
_, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, addVCSLocalDir)
|
||||
|
||||
require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir))
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.NoError(t, setLocalMount("dockerfile", "app", so, addVCSLocalDir))
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
}
|
||||
|
@@ -15,29 +15,29 @@ func saveLocalState(so *client.SolveOpt, target string, opts Options, node build
|
||||
}
|
||||
lp := opts.Inputs.ContextPath
|
||||
dp := opts.Inputs.DockerfilePath
|
||||
if lp != "" || dp != "" {
|
||||
if lp != "" {
|
||||
lp, err = filepath.Abs(lp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if dp != "" {
|
||||
dp, err = filepath.Abs(dp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l, err := localstate.New(configDir)
|
||||
if dp != "" && !IsRemoteURL(lp) && lp != "-" && dp != "-" {
|
||||
dp, err = filepath.Abs(dp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveRef(node.Builder, node.Name, so.Ref, localstate.State{
|
||||
Target: target,
|
||||
LocalPath: lp,
|
||||
DockerfilePath: dp,
|
||||
GroupRef: opts.GroupRef,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
if lp != "" && !IsRemoteURL(lp) && lp != "-" {
|
||||
lp, err = filepath.Abs(lp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if lp == "" && dp == "" {
|
||||
return nil
|
||||
}
|
||||
l, err := localstate.New(configDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveRef(node.Builder, node.Name, so.Ref, localstate.State{
|
||||
Target: target,
|
||||
LocalPath: lp,
|
||||
DockerfilePath: dp,
|
||||
GroupRef: opts.GroupRef,
|
||||
})
|
||||
}
|
||||
|
100
build/opt.go
100
build/opt.go
@@ -1,18 +1,19 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
@@ -34,7 +35,7 @@ import (
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
nodeDriver := node.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
@@ -104,10 +105,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
SourcePolicy: opt.SourcePolicy,
|
||||
}
|
||||
|
||||
if so.Ref == "" {
|
||||
so.Ref = identity.NewID()
|
||||
}
|
||||
|
||||
if opt.CgroupParent != "" {
|
||||
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
||||
}
|
||||
@@ -161,7 +158,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
case 1:
|
||||
// valid
|
||||
case 0:
|
||||
if !noDefaultLoad() && opt.PrintFunc == nil {
|
||||
if !noDefaultLoad() && opt.CallFunc == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
// backwards compat for docker driver only:
|
||||
// this ensures the build results in a docker image.
|
||||
@@ -258,25 +255,23 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||
// inline buildinfo attrs from build arg
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||
e.Attrs["buildinfo-attrs"] = v
|
||||
opt.Exports[i].Attrs["buildinfo-attrs"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
so.Exports = opt.Exports
|
||||
so.Session = opt.Session
|
||||
so.Session = slices.Clone(opt.Session)
|
||||
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, addVCSLocalDir, pw, &so)
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, releaseLoad)
|
||||
|
||||
if sharedKey := so.LocalDirs["context"]; sharedKey != "" {
|
||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||
sharedKey = filepath.Base(p)
|
||||
}
|
||||
so.SharedKey = sharedKey + ":" + confutil.TryNodeIdentifier(configDir)
|
||||
// add node identifier to shared key if one was specified
|
||||
if so.SharedKey != "" {
|
||||
so.SharedKey += ":" + confutil.TryNodeIdentifier(configDir)
|
||||
}
|
||||
|
||||
if opt.Pull {
|
||||
@@ -353,15 +348,15 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
so.FrontendAttrs["ulimit"] = ulimits
|
||||
}
|
||||
|
||||
// mark info request as internal
|
||||
if opt.PrintFunc != nil {
|
||||
// mark call request as internal
|
||||
if opt.CallFunc != nil {
|
||||
so.Internal = true
|
||||
}
|
||||
|
||||
return &so, releaseF, nil
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
@@ -370,7 +365,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
|
||||
var (
|
||||
err error
|
||||
dockerfileReader io.Reader
|
||||
dockerfileReader io.ReadCloser
|
||||
dockerfileDir string
|
||||
dockerfileName = inp.DockerfilePath
|
||||
toRemove []string
|
||||
@@ -385,11 +380,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, errStdinConflict
|
||||
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(inp.InStream)
|
||||
magic, err := buf.Peek(archiveHeaderSize * 2)
|
||||
rc := inp.InStream.NewReadCloser()
|
||||
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
@@ -397,28 +392,33 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
if isArchive(magic) {
|
||||
// stdin is context
|
||||
up := uploadprovider.New()
|
||||
target.FrontendAttrs["context"] = up.Add(buf)
|
||||
target.FrontendAttrs["context"] = up.Add(rc)
|
||||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errDockerfileConflict
|
||||
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = buf
|
||||
dockerfileReader = rc
|
||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case osutil.IsLocalDir(inp.ContextPath):
|
||||
if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedKey := inp.ContextPath
|
||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||
sharedKey = filepath.Base(p)
|
||||
}
|
||||
target.SharedKey = sharedKey
|
||||
switch inp.DockerfilePath {
|
||||
case "-":
|
||||
dockerfileReader = inp.InStream
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
case "":
|
||||
dockerfileDir = inp.ContextPath
|
||||
default:
|
||||
@@ -427,7 +427,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
case IsRemoteURL(inp.ContextPath):
|
||||
if inp.DockerfilePath == "-" {
|
||||
dockerfileReader = inp.InStream
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
} else if filepath.IsAbs(inp.DockerfilePath) {
|
||||
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||
@@ -439,11 +439,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
dockerfileReader = strings.NewReader(inp.DockerfileInline)
|
||||
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||
}
|
||||
|
||||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader)
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -467,7 +467,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
|
||||
if dockerfileDir != "" {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
@@ -529,7 +529,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
if err := setLocalMount(localName, v.Path, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
@@ -571,26 +571,19 @@ func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||
return dig, nil
|
||||
}
|
||||
|
||||
func setLocalMount(name, root string, so *client.SolveOpt, addVCSLocalDir func(key, dir string, so *client.SolveOpt)) error {
|
||||
lm, err := fsutil.NewFS(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err = filepath.EvalSymlinks(root) // keep same behavior as fsutil.NewFS
|
||||
func setLocalMount(name, dir string, so *client.SolveOpt) error {
|
||||
lm, err := fsutil.NewFS(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if so.LocalMounts == nil {
|
||||
so.LocalMounts = map[string]fsutil.FS{}
|
||||
}
|
||||
so.LocalMounts[name] = lm
|
||||
if addVCSLocalDir != nil {
|
||||
addVCSLocalDir(name, root, so)
|
||||
}
|
||||
so.LocalMounts[name] = &fs{FS: lm, dir: dir}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) {
|
||||
dir, err := os.MkdirTemp("", "dockerfile")
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -600,6 +593,16 @@ func createTempDockerfile(r io.Reader) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if multiReader != nil {
|
||||
dt, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
multiReader.Reset(dt)
|
||||
r = bytes.NewReader(dt)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -636,3 +639,10 @@ func handleLowercaseDockerfile(dir, p string) string {
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type fs struct {
|
||||
fsutil.FS
|
||||
dir string
|
||||
}
|
||||
|
||||
var _ fsutil.FS = &fs{}
|
||||
|
@@ -29,8 +29,7 @@ type provenanceBuilder struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.SolveResponse, ref string, pw progress.Writer) error {
|
||||
mode := confutil.MetadataProvenance()
|
||||
func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.SolveResponse, ref string, mode confutil.MetadataProvenanceMode, pw progress.Writer) error {
|
||||
if mode == confutil.MetadataProvenanceModeDisabled {
|
||||
return nil
|
||||
}
|
||||
|
164
build/replicatedstream.go
Normal file
164
build/replicatedstream.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SyncMultiReader struct {
|
||||
source *bufio.Reader
|
||||
buffer []byte
|
||||
static []byte
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond
|
||||
readers []*syncReader
|
||||
err error
|
||||
offset int
|
||||
}
|
||||
|
||||
type syncReader struct {
|
||||
mr *SyncMultiReader
|
||||
offset int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func NewSyncMultiReader(source io.Reader) *SyncMultiReader {
|
||||
mr := &SyncMultiReader{
|
||||
source: bufio.NewReader(source),
|
||||
buffer: make([]byte, 0, 32*1024),
|
||||
}
|
||||
mr.cond = sync.NewCond(&mr.mu)
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Peek(n int) ([]byte, error) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return mr.static[min(n, len(mr.static)):], nil
|
||||
}
|
||||
|
||||
return mr.source.Peek(n)
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Reset(dt []byte) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
mr.static = dt
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return io.NopCloser(bytes.NewReader(mr.static))
|
||||
}
|
||||
|
||||
reader := &syncReader{
|
||||
mr: mr,
|
||||
}
|
||||
mr.readers = append(mr.readers, reader)
|
||||
return reader
|
||||
}
|
||||
|
||||
func (sr *syncReader) Read(p []byte) (int, error) {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
func (sr *syncReader) read(p []byte) (int, error) {
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
loop0:
|
||||
for {
|
||||
if sr.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
if sr.mr.err != nil && sr.offset == end {
|
||||
return 0, sr.mr.err
|
||||
}
|
||||
|
||||
start := sr.offset - sr.mr.offset
|
||||
|
||||
dt := sr.mr.buffer[start:]
|
||||
|
||||
if len(dt) > 0 {
|
||||
n := copy(p, dt)
|
||||
sr.offset += n
|
||||
sr.mr.cond.Broadcast()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// check for readers that have not caught up
|
||||
hasOpen := false
|
||||
for _, r := range sr.mr.readers {
|
||||
if !r.closed {
|
||||
hasOpen = true
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if r.offset < end {
|
||||
sr.mr.cond.Wait()
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
|
||||
if !hasOpen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
last := sr.mr.offset + len(sr.mr.buffer)
|
||||
// another reader has already updated the buffer
|
||||
if last > end || sr.mr.err != nil {
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
sr.mr.offset += len(sr.mr.buffer)
|
||||
|
||||
sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)]
|
||||
n, err := sr.mr.source.Read(sr.mr.buffer)
|
||||
if n >= 0 {
|
||||
sr.mr.buffer = sr.mr.buffer[:n]
|
||||
} else {
|
||||
sr.mr.buffer = sr.mr.buffer[:0]
|
||||
}
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
if err != nil {
|
||||
sr.mr.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn := copy(p, sr.mr.buffer)
|
||||
sr.offset += nn
|
||||
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (sr *syncReader) Close() error {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
if sr.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
sr.closed = true
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
return nil
|
||||
}
|
77
build/replicatedstream_test.go
Normal file
77
build/replicatedstream_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
mathrand "math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func generateRandomData(size int) []byte {
|
||||
data := make([]byte, size)
|
||||
rand.Read(data)
|
||||
return data
|
||||
}
|
||||
func TestSyncMultiReaderParallel(t *testing.T) {
|
||||
data := generateRandomData(1024 * 1024)
|
||||
source := bytes.NewReader(data)
|
||||
mr := NewSyncMultiReader(source)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numReaders := 10
|
||||
bufferSize := 4096 * 4
|
||||
|
||||
readers := make([]io.ReadCloser, numReaders)
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
readers[i] = mr.NewReadCloser()
|
||||
}
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(readerId int) {
|
||||
defer wg.Done()
|
||||
reader := readers[readerId]
|
||||
defer reader.Close()
|
||||
|
||||
totalRead := 0
|
||||
buf := make([]byte, bufferSize)
|
||||
for totalRead < len(data) {
|
||||
// Simulate random read sizes
|
||||
readSize := mathrand.Intn(bufferSize) //nolint:gosec
|
||||
n, err := reader.Read(buf[:readSize])
|
||||
|
||||
if n > 0 {
|
||||
assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId)
|
||||
totalRead += n
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err, "Reader %d error", readerId)
|
||||
|
||||
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||
t.Logf("Reader %d closing", readerId)
|
||||
// Simulate random close
|
||||
return
|
||||
}
|
||||
|
||||
// Simulate random timing between reads
|
||||
time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec
|
||||
}
|
||||
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -601,8 +601,7 @@ func csvToMap(in []string) (map[string]string, error) {
|
||||
}
|
||||
m := make(map[string]string, len(in))
|
||||
for _, s := range in {
|
||||
csvReader := csv.NewReader(strings.NewReader(s))
|
||||
fields, err := csvReader.Read()
|
||||
fields, err := csvvalue.Fields(s, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -6,9 +6,8 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
@@ -18,7 +17,6 @@ import (
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
@@ -48,8 +46,9 @@ func (b *Builder) Nodes() []Node {
|
||||
type LoadNodesOption func(*loadNodesOptions)
|
||||
|
||||
type loadNodesOptions struct {
|
||||
data bool
|
||||
dialMeta map[string][]string
|
||||
data bool
|
||||
dialMeta map[string][]string
|
||||
clientOpt []client.ClientOpt
|
||||
}
|
||||
|
||||
func WithData() LoadNodesOption {
|
||||
@@ -64,6 +63,12 @@ func WithDialMeta(dialMeta map[string][]string) LoadNodesOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithClientOpt(clientOpt ...client.ClientOpt) LoadNodesOption {
|
||||
return func(o *loadNodesOptions) {
|
||||
o.clientOpt = clientOpt
|
||||
}
|
||||
}
|
||||
|
||||
// LoadNodes loads and returns nodes for this builder.
|
||||
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
||||
func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []Node, err error) {
|
||||
@@ -112,37 +117,19 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
return nil
|
||||
}
|
||||
|
||||
contextStore := b.opts.dockerCli.ContextStore()
|
||||
|
||||
var kcc driver.KubeClientConfig
|
||||
kcc, err = ctxkube.ConfigFromEndpoint(n.Endpoint, contextStore)
|
||||
if err != nil {
|
||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME(@AkihiroSuda): n should retain real context name.
|
||||
kcc, err = ctxkube.ConfigFromEndpoint("default", contextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
tryToUseKubeConfigInCluster := false
|
||||
if kcc == nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
} else {
|
||||
if _, err := kcc.ClientConfig(); err != nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseKubeConfigInCluster {
|
||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
kcc = kccInCluster
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, driver.BuilderName(n.Name), factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.BuildkitdFlags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash, lno.dialMeta)
|
||||
d, err := driver.GetDriver(ctx, factory, driver.InitConfig{
|
||||
Name: driver.BuilderName(n.Name),
|
||||
EndpointAddr: n.Endpoint,
|
||||
DockerAPI: dockerapi,
|
||||
ContextStore: b.opts.dockerCli.ContextStore(),
|
||||
BuildkitdFlags: n.BuildkitdFlags,
|
||||
Files: n.Files,
|
||||
DriverOpts: n.DriverOpts,
|
||||
Auth: imageopt.Auth,
|
||||
Platforms: n.Platforms,
|
||||
ContextPathHash: b.opts.contextPathHash,
|
||||
DialMeta: lno.dialMeta,
|
||||
})
|
||||
if err != nil {
|
||||
node.Err = err
|
||||
return nil
|
||||
@@ -151,7 +138,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
node.ImageOpt = imageopt
|
||||
|
||||
if lno.data {
|
||||
if err := node.loadData(ctx); err != nil {
|
||||
if err := node.loadData(ctx, lno.clientOpt...); err != nil {
|
||||
node.Err = err
|
||||
}
|
||||
}
|
||||
@@ -247,7 +234,7 @@ func (n *Node) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) loadData(ctx context.Context) error {
|
||||
func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) error {
|
||||
if n.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -257,7 +244,7 @@ func (n *Node) loadData(ctx context.Context) error {
|
||||
}
|
||||
n.DriverInfo = info
|
||||
if n.DriverInfo.Status == driver.Running {
|
||||
driverClient, err := n.Driver.Client(ctx)
|
||||
driverClient, err := n.Driver.Client(ctx, clientOpt...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -1 +1,4 @@
|
||||
comment: false
|
||||
|
||||
ignore:
|
||||
- "**/*.pb.go"
|
||||
|
506
commands/bake.go
506
commands/bake.go
@@ -1,24 +1,36 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/bake/hclparser"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@@ -26,22 +38,29 @@ import (
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
type bakeOptions struct {
|
||||
files []string
|
||||
overrides []string
|
||||
printOnly bool
|
||||
sbom string
|
||||
provenance string
|
||||
files []string
|
||||
overrides []string
|
||||
printOnly bool
|
||||
listTargets bool
|
||||
listVars bool
|
||||
sbom string
|
||||
provenance string
|
||||
allow []string
|
||||
|
||||
builder string
|
||||
metadataFile string
|
||||
exportPush bool
|
||||
exportLoad bool
|
||||
callFunc string
|
||||
}
|
||||
|
||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||
mp := dockerCli.MeterProvider()
|
||||
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -50,26 +69,16 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
end(err)
|
||||
}()
|
||||
|
||||
var url string
|
||||
cmdContext := "cwd://"
|
||||
|
||||
if len(targets) > 0 {
|
||||
if build.IsRemoteURL(targets[0]) {
|
||||
url = targets[0]
|
||||
targets = targets[1:]
|
||||
if len(targets) > 0 {
|
||||
if build.IsRemoteURL(targets[0]) {
|
||||
cmdContext = targets[0]
|
||||
targets = targets[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
url, cmdContext, targets := bakeArgs(targets)
|
||||
if len(targets) == 0 {
|
||||
targets = []string{"default"}
|
||||
}
|
||||
|
||||
callFunc, err := buildflags.ParseCallFunc(in.callFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
overrides := in.overrides
|
||||
if in.exportPush {
|
||||
overrides = append(overrides, "*.push=true")
|
||||
@@ -77,6 +86,9 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
if in.exportLoad {
|
||||
overrides = append(overrides, "*.load=true")
|
||||
}
|
||||
if callFunc != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.call=%s", callFunc.Name))
|
||||
}
|
||||
if cFlags.noCache != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
|
||||
}
|
||||
@@ -91,6 +103,11 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
contextPathHash, _ := os.Getwd()
|
||||
|
||||
ent, err := bake.ParseEntitlements(in.allow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
@@ -98,6 +115,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
var progressConsoleDesc, progressTextDesc string
|
||||
|
||||
// instance only needed for reading remote bake files or building
|
||||
var driverType string
|
||||
if url != "" || !in.printOnly {
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
@@ -115,32 +133,33 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
||||
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
||||
driverType = b.Driver
|
||||
}
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||
term = true
|
||||
}
|
||||
attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in)
|
||||
|
||||
progressMode := progressui.DisplayMode(cFlags.progress)
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
)
|
||||
if err != nil {
|
||||
var printer *progress.Printer
|
||||
|
||||
makePrinter := func() error {
|
||||
var err error
|
||||
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
progress.WithMetrics(mp, attributes),
|
||||
progress.WithOnClose(func() {
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
}),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err == nil && progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := makePrinter(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer)
|
||||
if err != nil {
|
||||
@@ -151,12 +170,29 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
return errors.New("couldn't find a bake definition")
|
||||
}
|
||||
|
||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||
defaults := map[string]string{
|
||||
// don't forget to update documentation if you add a new
|
||||
// built-in variable: docs/bake-reference.md#built-in-variables
|
||||
"BAKE_CMD_CONTEXT": cmdContext,
|
||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
||||
})
|
||||
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||
}
|
||||
|
||||
if in.listTargets || in.listVars {
|
||||
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.listTargets {
|
||||
return printTargetList(dockerCli.Out(), cfg)
|
||||
} else if in.listVars {
|
||||
return printVars(dockerCli.Out(), pm.AllVariables)
|
||||
}
|
||||
}
|
||||
|
||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -189,57 +225,183 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
|
||||
if in.printOnly {
|
||||
dt, err := json.MarshalIndent(def, "", " ")
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = printer.Wait()
|
||||
printer = nil
|
||||
if err != nil {
|
||||
_, err = fmt.Fprintln(dockerCli.Out(), string(dtdef))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, opt := range bo {
|
||||
if opt.CallFunc != nil {
|
||||
cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt.CallFunc.Name = cf.Name
|
||||
}
|
||||
}
|
||||
|
||||
exp, err := ent.Validate(bo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||
return err
|
||||
}
|
||||
if printer.IsDone() {
|
||||
// init new printer as old one was stopped to show the prompt
|
||||
if err := makePrinter(); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||
return nil
|
||||
}
|
||||
|
||||
groupRef := identity.NewID()
|
||||
var refs []string
|
||||
for k, b := range bo {
|
||||
b.Ref = identity.NewID()
|
||||
b.GroupRef = groupRef
|
||||
b.WithProvenanceResponse = len(in.metadataFile) > 0
|
||||
refs = append(refs, b.Ref)
|
||||
bo[k] = b
|
||||
}
|
||||
dt, err := json.Marshal(def)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := saveLocalStateGroup(dockerCli, groupRef, localstate.StateGroup{
|
||||
Definition: dt,
|
||||
Targets: targets,
|
||||
Inputs: overrides,
|
||||
Refs: refs,
|
||||
}); err != nil {
|
||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
if retErr != nil {
|
||||
err = wrapBuildError(retErr, true)
|
||||
}
|
||||
done(err)
|
||||
|
||||
if err != nil {
|
||||
return wrapBuildError(err, true)
|
||||
return err
|
||||
}
|
||||
|
||||
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if callFunc == nil {
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
}
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
var callFormatJSON bool
|
||||
jsonResults := map[string]map[string]any{}
|
||||
if callFunc != nil {
|
||||
callFormatJSON = callFunc.Format == "json"
|
||||
}
|
||||
var sep bool
|
||||
var exitCode int
|
||||
|
||||
names := make([]string, 0, len(bo))
|
||||
for name := range bo {
|
||||
names = append(names, name)
|
||||
}
|
||||
slices.Sort(names)
|
||||
|
||||
for _, name := range names {
|
||||
req := bo[name]
|
||||
if req.CallFunc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pf := &pb.CallFunc{
|
||||
Name: req.CallFunc.Name,
|
||||
Format: req.CallFunc.Format,
|
||||
IgnoreStatus: req.CallFunc.IgnoreStatus,
|
||||
}
|
||||
|
||||
if callFunc != nil {
|
||||
pf.Format = callFunc.Format
|
||||
pf.IgnoreStatus = callFunc.IgnoreStatus
|
||||
}
|
||||
|
||||
var res map[string]string
|
||||
if sp, ok := resp[name]; ok {
|
||||
res = sp.ExporterResponse
|
||||
}
|
||||
|
||||
if callFormatJSON {
|
||||
jsonResults[name] = map[string]any{}
|
||||
buf := &bytes.Buffer{}
|
||||
if code, err := printResult(buf, pf, res); err != nil {
|
||||
jsonResults[name]["error"] = err.Error()
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
exitCode = code
|
||||
}
|
||||
m := map[string]*json.RawMessage{}
|
||||
if err := json.Unmarshal(buf.Bytes(), &m); err == nil {
|
||||
for k, v := range m {
|
||||
jsonResults[name][k] = v
|
||||
}
|
||||
} else {
|
||||
jsonResults[name][pf.Name] = json.RawMessage(buf.Bytes())
|
||||
}
|
||||
} else {
|
||||
if sep {
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
} else {
|
||||
sep = true
|
||||
}
|
||||
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||
if descr := tgts[name].Description; descr != "" {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s\n", descr)
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
if code, err := printResult(dockerCli.Out(), pf, res); err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
exitCode = code
|
||||
}
|
||||
}
|
||||
}
|
||||
if callFormatJSON {
|
||||
out := struct {
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]map[string]any `json:"target"`
|
||||
}{
|
||||
Group: grps,
|
||||
Target: map[string]map[string]any{},
|
||||
}
|
||||
|
||||
for name, def := range tgts {
|
||||
out.Target[name] = map[string]any{
|
||||
"build": def,
|
||||
}
|
||||
if res, ok := jsonResults[name]; ok {
|
||||
printName := bo[name].CallFunc.Name
|
||||
if printName == "lint" {
|
||||
printName = "check"
|
||||
}
|
||||
out.Target[name][printName] = res
|
||||
}
|
||||
}
|
||||
dt, err := json.MarshalIndent(out, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
@@ -275,18 +437,68 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources")
|
||||
|
||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.Lookup("check").NoOptDefVal = "true"
|
||||
|
||||
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||
cobrautil.MarkFlagsExperimental(flags, "list-targets")
|
||||
flags.MarkHidden("list-targets")
|
||||
|
||||
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||
cobrautil.MarkFlagsExperimental(flags, "list-variables")
|
||||
flags.MarkHidden("list-variables")
|
||||
|
||||
commonBuildFlags(&cFlags, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func saveLocalStateGroup(dockerCli command.Cli, ref string, lsg localstate.StateGroup) error {
|
||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
||||
prm := confutil.MetadataProvenance()
|
||||
if len(in.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
groupRef := identity.NewID()
|
||||
refs := make([]string, 0, len(bo))
|
||||
for k, b := range bo {
|
||||
b.Ref = identity.NewID()
|
||||
b.GroupRef = groupRef
|
||||
b.ProvenanceResponseMode = prm
|
||||
refs = append(refs, b.Ref)
|
||||
bo[k] = b
|
||||
}
|
||||
l, err := localstate.New(confutil.ConfigDir(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveGroup(ref, lsg)
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||
Definition: dtdef,
|
||||
Targets: targets,
|
||||
Inputs: overrides,
|
||||
Refs: refs,
|
||||
})
|
||||
}
|
||||
|
||||
// bakeArgs will retrieve the remote url, command context, and targets
|
||||
// from the command line arguments.
|
||||
func bakeArgs(args []string) (url, cmdContext string, targets []string) {
|
||||
cmdContext, targets = "cwd://", args
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
url, targets = targets[0], targets[1:]
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
cmdContext, targets = targets[0], targets[1:]
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
|
||||
func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) {
|
||||
@@ -331,3 +543,157 @@ func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
||||
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
defer tw.Flush()
|
||||
|
||||
tw.Write([]byte("VARIABLE\tVALUE\tDESCRIPTION\n"))
|
||||
|
||||
for _, v := range vars {
|
||||
var value string
|
||||
if v.Value != nil {
|
||||
value = *v.Value
|
||||
} else {
|
||||
value = "<null>"
|
||||
}
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\n", v.Name, value, v.Description)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printTargetList(w io.Writer, cfg *bake.Config) error {
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
defer tw.Flush()
|
||||
|
||||
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||
|
||||
type targetOrGroup struct {
|
||||
name string
|
||||
target *bake.Target
|
||||
group *bake.Group
|
||||
}
|
||||
|
||||
list := make([]targetOrGroup, 0, len(cfg.Targets)+len(cfg.Groups))
|
||||
for _, tgt := range cfg.Targets {
|
||||
list = append(list, targetOrGroup{name: tgt.Name, target: tgt})
|
||||
}
|
||||
for _, grp := range cfg.Groups {
|
||||
list = append(list, targetOrGroup{name: grp.Name, group: grp})
|
||||
}
|
||||
|
||||
slices.SortFunc(list, func(a, b targetOrGroup) int {
|
||||
return cmp.Compare(a.name, b.name)
|
||||
})
|
||||
|
||||
for _, tgt := range list {
|
||||
if strings.HasPrefix(tgt.name, "_") {
|
||||
// convention for a private target
|
||||
continue
|
||||
}
|
||||
var descr string
|
||||
if tgt.target != nil {
|
||||
descr = tgt.target.Description
|
||||
} else if tgt.group != nil {
|
||||
descr = tgt.group.Description
|
||||
|
||||
if len(tgt.group.Targets) > 0 {
|
||||
slices.Sort(tgt.group.Targets)
|
||||
names := strings.Join(tgt.group.Targets, ", ")
|
||||
if descr != "" {
|
||||
descr += " (" + names + ")"
|
||||
} else {
|
||||
descr = names
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set {
|
||||
return attribute.NewSet(
|
||||
commandNameAttribute.String("bake"),
|
||||
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||
bakeOptions: options,
|
||||
configDir: confutil.ConfigDir(dockerCli),
|
||||
url: url,
|
||||
cmdContext: cmdContext,
|
||||
targets: targets,
|
||||
}),
|
||||
driverNameAttribute.String(options.builder),
|
||||
driverTypeAttribute.String(driverType),
|
||||
)
|
||||
}
|
||||
|
||||
type bakeOptionsHash struct {
|
||||
*bakeOptions
|
||||
configDir string
|
||||
url string
|
||||
cmdContext string
|
||||
targets []string
|
||||
result string
|
||||
resultOnce sync.Once
|
||||
}
|
||||
|
||||
func (o *bakeOptionsHash) String() string {
|
||||
o.resultOnce.Do(func() {
|
||||
url := o.url
|
||||
cmdContext := o.cmdContext
|
||||
if cmdContext == "cwd://" {
|
||||
// Resolve the directory if the cmdContext is the current working directory.
|
||||
cmdContext = osutil.GetWd()
|
||||
}
|
||||
|
||||
// Sort the inputs for files and targets since the ordering
|
||||
// doesn't matter, but avoid modifying the original slice.
|
||||
files := immutableSort(o.files)
|
||||
targets := immutableSort(o.targets)
|
||||
|
||||
joinedFiles := strings.Join(files, ",")
|
||||
joinedTargets := strings.Join(targets, ",")
|
||||
salt := confutil.TryNodeIdentifier(o.configDir)
|
||||
|
||||
h := sha256.New()
|
||||
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||
_, _ = io.WriteString(h, s)
|
||||
h.Write([]byte{0})
|
||||
}
|
||||
o.result = hex.EncodeToString(h.Sum(nil))
|
||||
})
|
||||
return o.result
|
||||
}
|
||||
|
||||
// immutableSort will sort the entries in s without modifying the original slice.
|
||||
func immutableSort(s []string) []string {
|
||||
if !sort.StringsAreSorted(s) {
|
||||
cpy := make([]string, len(s))
|
||||
copy(cpy, s)
|
||||
sort.Strings(cpy)
|
||||
return cpy
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
w io.Writer
|
||||
once sync.Once
|
||||
wait func() error
|
||||
}
|
||||
|
||||
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||
w.once.Do(func() {
|
||||
if w.wait != nil {
|
||||
err = w.wait()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.w.Write(p)
|
||||
}
|
||||
|
@@ -5,12 +5,10 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/csv"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -39,7 +37,6 @@ import (
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
@@ -59,6 +56,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -81,7 +79,7 @@ type buildOptions struct {
|
||||
noCacheFilter []string
|
||||
outputs []string
|
||||
platforms []string
|
||||
printFunc string
|
||||
callFunc string
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
@@ -201,12 +199,16 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.PrintFunc, err = buildflags.ParsePrintFunc(o.printFunc)
|
||||
opts.CallFunc, err = buildflags.ParseCallFunc(o.callFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.WithProvenanceResponse = opts.PrintFunc == nil && len(o.metadataFile) > 0
|
||||
prm := confutil.MetadataProvenance()
|
||||
if opts.CallFunc != nil || len(o.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
opts.ProvenanceResponseMode = string(prm)
|
||||
|
||||
return &opts, nil
|
||||
}
|
||||
@@ -222,15 +224,22 @@ func (o *buildOptions) toDisplayMode() (progressui.DisplayMode, error) {
|
||||
return progress, nil
|
||||
}
|
||||
|
||||
func buildMetricAttributes(dockerCli command.Cli, b *builder.Builder, options *buildOptions) attribute.Set {
|
||||
const (
|
||||
commandNameAttribute = attribute.Key("command.name")
|
||||
commandOptionsHash = attribute.Key("command.options.hash")
|
||||
driverNameAttribute = attribute.Key("driver.name")
|
||||
driverTypeAttribute = attribute.Key("driver.type")
|
||||
)
|
||||
|
||||
func buildMetricAttributes(dockerCli command.Cli, driverType string, options *buildOptions) attribute.Set {
|
||||
return attribute.NewSet(
|
||||
attribute.String("command.name", "build"),
|
||||
attribute.Stringer("command.options.hash", &buildOptionsHash{
|
||||
commandNameAttribute.String("build"),
|
||||
attribute.Stringer(string(commandOptionsHash), &buildOptionsHash{
|
||||
buildOptions: options,
|
||||
configDir: confutil.ConfigDir(dockerCli),
|
||||
}),
|
||||
attribute.String("driver.name", options.builder),
|
||||
attribute.String("driver.type", b.Driver),
|
||||
driverNameAttribute.String(options.builder),
|
||||
driverTypeAttribute.String(driverType),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -306,12 +315,13 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
driverType := b.Driver
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||
term = true
|
||||
}
|
||||
attributes := buildMetricAttributes(dockerCli, b, &options)
|
||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
@@ -365,13 +375,22 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
||||
return errors.Wrap(err, "writing image ID file")
|
||||
}
|
||||
}
|
||||
if opts.PrintFunc != nil {
|
||||
if err := printResult(opts.PrintFunc, resp.ExporterResponse); err != nil {
|
||||
if options.metadataFile != "" {
|
||||
dt := decodeExporterResponse(resp.ExporterResponse)
|
||||
if opts.CallFunc == nil {
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
}
|
||||
if err := writeMetadataFile(options.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if options.metadataFile != "" {
|
||||
if err := writeMetadataFile(options.metadataFile, decodeExporterResponse(resp.ExporterResponse)); err != nil {
|
||||
}
|
||||
if opts.CallFunc != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil {
|
||||
return err
|
||||
} else if exitcode != 0 {
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -522,9 +541,12 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "build [OPTIONS] PATH | URL | -",
|
||||
Aliases: []string{"b"},
|
||||
Short: "Start a build",
|
||||
Args: cli.ExactArgs(1),
|
||||
Aliases: []string{"b"},
|
||||
Annotations: map[string]string{
|
||||
"aliases": "docker build, docker builder build, docker image build, docker buildx b",
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.contextPath = args[0]
|
||||
options.builder = rootOpts.builder
|
||||
@@ -563,7 +585,6 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/reference/cli/docker/image/build/#add-host"})
|
||||
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||
|
||||
@@ -576,12 +597,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
|
||||
|
||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", `Set the parent cgroup for the "RUN" instructions during build`)
|
||||
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/reference/cli/docker/image/build/#cgroup-parent"})
|
||||
|
||||
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
|
||||
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
|
||||
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/reference/cli/docker/image/build/#file"})
|
||||
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to a file")
|
||||
|
||||
@@ -608,10 +627,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
|
||||
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
|
||||
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/reference/cli/docker/image/build/#tag"})
|
||||
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
|
||||
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/reference/cli/docker/image/build/#target"})
|
||||
|
||||
options.ulimits = dockeropts.NewUlimitOpt(nil)
|
||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||
@@ -628,8 +645,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config")
|
||||
}
|
||||
|
||||
flags.StringVar(&options.printFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.VarPF(callAlias(options, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.Lookup("check").NoOptDefVal = "true"
|
||||
|
||||
// hidden flags
|
||||
@@ -638,7 +655,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
var ignoreBool bool
|
||||
var ignoreInt int64
|
||||
|
||||
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets)")
|
||||
flags.StringVar(&options.callFunc, "print", "", "Print result of information request (e.g., outline, targets)")
|
||||
cobrautil.MarkFlagsExperimental(flags, "print")
|
||||
flags.MarkHidden("print")
|
||||
|
||||
@@ -699,7 +716,7 @@ type commonFlags struct {
|
||||
|
||||
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
|
||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
|
||||
}
|
||||
@@ -725,9 +742,17 @@ func writeMetadataFile(filename string, dt interface{}) error {
|
||||
}
|
||||
|
||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
||||
decFunc := func(k, v string) ([]byte, error) {
|
||||
if k == "result.json" {
|
||||
// result.json is part of metadata response for subrequests which
|
||||
// is already a JSON object: https://github.com/moby/buildkit/blob/f6eb72f2f5db07ddab89ac5e2bd3939a6444f4be/frontend/dockerui/requests.go#L100-L102
|
||||
return []byte(v), nil
|
||||
}
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
out := make(map[string]interface{})
|
||||
for k, v := range exporterResponse {
|
||||
dt, err := base64.StdEncoding.DecodeString(v)
|
||||
dt, err := decFunc(k, v)
|
||||
if err != nil {
|
||||
out[k] = v
|
||||
continue
|
||||
@@ -829,7 +854,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
||||
}
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
||||
fmt.Fprintf(sb, " (use docker --debug to expand)")
|
||||
}
|
||||
fmt.Fprintf(sb, ":\n")
|
||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
||||
@@ -857,47 +882,95 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
||||
}
|
||||
}
|
||||
|
||||
func printResult(f *controllerapi.PrintFunc, res map[string]string) error {
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) {
|
||||
switch f.Name {
|
||||
case "outline":
|
||||
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
case "targets":
|
||||
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
||||
return 0, printValue(w, targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
||||
case "subrequests.describe":
|
||||
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
||||
return 0, printValue(w, subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
||||
case "lint":
|
||||
return printValue(lint.PrintLintViolations, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
lintResults := lint.LintResults{}
|
||||
if result, ok := res["result.json"]; ok {
|
||||
if err := json.Unmarshal([]byte(result), &lintResults); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
warningCount := len(lintResults.Warnings)
|
||||
if f.Format != "json" && warningCount > 0 {
|
||||
var warningCountMsg string
|
||||
if warningCount == 1 {
|
||||
warningCountMsg = "1 warning has been found!"
|
||||
} else if warningCount > 1 {
|
||||
warningCountMsg = fmt.Sprintf("%d warnings have been found!", warningCount)
|
||||
}
|
||||
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
||||
}
|
||||
|
||||
err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if lintResults.Error != nil {
|
||||
// Print the error message and the source
|
||||
// Normally, we would use `errdefs.WithSource` to attach the source to the
|
||||
// error and let the error be printed by the handling that's already in place,
|
||||
// but here we want to print the error in a way that's consistent with how
|
||||
// the lint warnings are printed via the `lint.PrintLintViolations` function,
|
||||
// which differs from the default error printing.
|
||||
if f.Format != "json" && len(lintResults.Warnings) > 0 {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
lintBuf := bytes.NewBuffer([]byte(lintResults.Error.Message + "\n"))
|
||||
sourceInfo := lintResults.Sources[lintResults.Error.Location.SourceIndex]
|
||||
source := errdefs.Source{
|
||||
Info: sourceInfo,
|
||||
Ranges: lintResults.Error.Location.Ranges,
|
||||
}
|
||||
source.Print(lintBuf)
|
||||
return 0, errors.New(lintBuf.String())
|
||||
} else if len(lintResults.Warnings) == 0 && f.Format != "json" {
|
||||
fmt.Fprintln(w, "Check complete, no warnings found.")
|
||||
}
|
||||
default:
|
||||
if dt, ok := res["result.json"]; ok && f.Format == "json" {
|
||||
fmt.Println(dt)
|
||||
fmt.Fprintln(w, dt)
|
||||
} else if dt, ok := res["result.txt"]; ok {
|
||||
fmt.Print(dt)
|
||||
fmt.Fprint(w, dt)
|
||||
} else {
|
||||
log.Printf("%s %+v", f, res)
|
||||
fmt.Fprintf(w, "%s %+v\n", f, res)
|
||||
}
|
||||
}
|
||||
if v, ok := res["result.statuscode"]; !f.IgnoreStatus && ok {
|
||||
if n, err := strconv.Atoi(v); err == nil && n != 0 {
|
||||
os.Exit(n)
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type printFunc func([]byte, io.Writer) error
|
||||
type callFunc func([]byte, io.Writer) error
|
||||
|
||||
func printValue(printer printFunc, version string, format string, res map[string]string) error {
|
||||
func printValue(w io.Writer, printer callFunc, version string, format string, res map[string]string) error {
|
||||
if format == "json" {
|
||||
fmt.Fprintln(os.Stdout, res["result.json"])
|
||||
fmt.Fprintln(w, res["result.json"])
|
||||
return nil
|
||||
}
|
||||
|
||||
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
|
||||
// structure is too new and we don't know how to print it
|
||||
fmt.Fprint(os.Stdout, res["result.txt"])
|
||||
fmt.Fprint(w, res["result.txt"])
|
||||
return nil
|
||||
}
|
||||
return printer([]byte(res["result.json"]), os.Stdout)
|
||||
return printer([]byte(res["result.json"]), w)
|
||||
}
|
||||
|
||||
// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted
|
||||
func printLintViolationsWrapper(dt []byte, w io.Writer) error {
|
||||
return lint.PrintLintViolations(dt, w, nil)
|
||||
}
|
||||
|
||||
type invokeConfig struct {
|
||||
@@ -947,9 +1020,9 @@ func (cfg *invokeConfig) parseInvokeConfig(invoke, on string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
csvReader := csv.NewReader(strings.NewReader(invoke))
|
||||
csvReader.LazyQuotes = true
|
||||
fields, err := csvReader.Read()
|
||||
csvParser := csvvalue.NewParser()
|
||||
csvParser.LazyQuotes = true
|
||||
fields, err := csvParser.Fields(invoke, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1005,7 +1078,7 @@ func maybeJSONArray(v string) []string {
|
||||
return []string{v}
|
||||
}
|
||||
|
||||
func callAlias(options *buildOptions, value string) cobrautil.BoolFuncValue {
|
||||
func callAlias(target *string, value string) cobrautil.BoolFuncValue {
|
||||
return func(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
@@ -1013,7 +1086,7 @@ func callAlias(options *buildOptions, value string) cobrautil.BoolFuncValue {
|
||||
}
|
||||
|
||||
if v {
|
||||
options.printFunc = value
|
||||
*target = value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -80,7 +80,7 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
||||
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
|
||||
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
|
||||
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
|
||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty") for the monitor. Use plain to show container output`)
|
||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
||||
|
||||
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
|
||||
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
@@ -126,6 +126,6 @@ func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection")
|
||||
flags.StringVar(&opts.progress, "progress", "quiet", "Set type of progress output (auto, plain, tty).")
|
||||
flags.StringVar(&opts.progress, "progress", "quiet", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||
return cmd
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
@@ -154,7 +155,12 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
}
|
||||
}
|
||||
|
||||
dt, desc, err := r.Combine(ctx, srcs, in.annotations, in.preferIndex)
|
||||
annotations, err := buildflags.ParseAnnotations(in.annotations)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse annotations")
|
||||
}
|
||||
|
||||
dt, desc, err := r.Combine(ctx, srcs, annotations, in.preferIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -282,7 +288,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||
flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy")
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||
var opt rootOptions
|
||||
cmd := &cobra.Command{
|
||||
Short: "Docker Buildx",
|
||||
Long: `Extended build capabilities with BuildKit`,
|
||||
@@ -32,6 +33,10 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
HiddenDefaultCmd: true,
|
||||
},
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if opt.debug {
|
||||
debug.Enable()
|
||||
}
|
||||
|
||||
cmd.SetContext(appcontext.Context())
|
||||
if !isPlugin {
|
||||
return nil
|
||||
@@ -47,11 +52,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
cmd.TraverseChildren = true
|
||||
cmd.DisableFlagsInUseLine = true
|
||||
cli.DisableFlagsInUseLine(cmd)
|
||||
|
||||
// DEBUG=1 should perform the same as --debug at the docker root level
|
||||
if debug.IsEnabled() {
|
||||
debug.Enable()
|
||||
}
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&logutil.Formatter{})
|
||||
@@ -68,16 +68,16 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
cmd.SetHelpTemplate(cmd.HelpTemplate() + "\nExperimental commands and flags are hidden. Set BUILDX_EXPERIMENTAL=1 to show them.\n")
|
||||
}
|
||||
|
||||
addCommands(cmd, dockerCli)
|
||||
addCommands(cmd, &opt, dockerCli)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type rootOptions struct {
|
||||
builder string
|
||||
debug bool
|
||||
}
|
||||
|
||||
func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
opts := &rootOptions{}
|
||||
func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||
rootFlags(opts, cmd.PersistentFlags())
|
||||
|
||||
cmd.AddCommand(
|
||||
@@ -112,4 +112,5 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
|
||||
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
||||
flags.BoolVarP(&options.debug, "debug", "D", debug.IsEnabled(), "Enable debug logging")
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package build
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -19,9 +18,8 @@ import (
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
@@ -50,7 +48,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: in.ContextPath,
|
||||
DockerfilePath: in.DockerfileName,
|
||||
InStream: inStream,
|
||||
InStream: build.NewSyncMultiReader(inStream),
|
||||
NamedContexts: contexts,
|
||||
},
|
||||
Ref: in.Ref,
|
||||
@@ -67,7 +65,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
Target: in.Target,
|
||||
Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits),
|
||||
GroupRef: in.GroupRef,
|
||||
WithProvenanceResponse: in.WithProvenanceResponse,
|
||||
ProvenanceResponseMode: confutil.ParseMetadataProvenance(in.ProvenanceResponseMode),
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(in.Platforms)
|
||||
@@ -76,7 +74,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
}
|
||||
opts.Platforms = platforms
|
||||
|
||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
||||
dockerConfig := dockerCli.ConfigFile()
|
||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
||||
|
||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||
@@ -136,8 +134,9 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
|
||||
annotations, err := buildflags.ParseAnnotations(in.Annotations)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrap(err, "parse annotations")
|
||||
}
|
||||
|
||||
for _, o := range outputs {
|
||||
for k, v := range annotations {
|
||||
o.Attrs[k.String()] = v
|
||||
@@ -159,11 +158,11 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
}
|
||||
opts.Allow = allow
|
||||
|
||||
if in.PrintFunc != nil {
|
||||
opts.PrintFunc = &build.PrintFunc{
|
||||
Name: in.PrintFunc.Name,
|
||||
Format: in.PrintFunc.Format,
|
||||
IgnoreStatus: in.PrintFunc.IgnoreStatus,
|
||||
if in.CallFunc != nil {
|
||||
opts.CallFunc = &build.CallFunc{
|
||||
Name: in.CallFunc.Name,
|
||||
Format: in.CallFunc.Format,
|
||||
IgnoreStatus: in.CallFunc.IgnoreStatus,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,9 +269,9 @@ func controllerUlimitOpt2DockerUlimit(u *controllerapi.UlimitOpt) *dockeropts.Ul
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
values := make(map[string]*units.Ulimit)
|
||||
values := make(map[string]*container.Ulimit)
|
||||
for k, v := range u.Values {
|
||||
values[k] = &units.Ulimit{
|
||||
values[k] = &container.Ulimit{
|
||||
Name: v.Name,
|
||||
Hard: v.Hard,
|
||||
Soft: v.Soft,
|
||||
|
@@ -273,7 +273,7 @@ func (m *BuildRequest) GetOptions() *BuildOptions {
|
||||
type BuildOptions struct {
|
||||
ContextPath string `protobuf:"bytes,1,opt,name=ContextPath,proto3" json:"ContextPath,omitempty"`
|
||||
DockerfileName string `protobuf:"bytes,2,opt,name=DockerfileName,proto3" json:"DockerfileName,omitempty"`
|
||||
PrintFunc *PrintFunc `protobuf:"bytes,3,opt,name=PrintFunc,proto3" json:"PrintFunc,omitempty"`
|
||||
CallFunc *CallFunc `protobuf:"bytes,3,opt,name=CallFunc,proto3" json:"CallFunc,omitempty"`
|
||||
NamedContexts map[string]string `protobuf:"bytes,4,rep,name=NamedContexts,proto3" json:"NamedContexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Allow []string `protobuf:"bytes,5,rep,name=Allow,proto3" json:"Allow,omitempty"`
|
||||
Attests []*Attest `protobuf:"bytes,6,rep,name=Attests,proto3" json:"Attests,omitempty"`
|
||||
@@ -302,7 +302,7 @@ type BuildOptions struct {
|
||||
Ref string `protobuf:"bytes,29,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
||||
GroupRef string `protobuf:"bytes,30,opt,name=GroupRef,proto3" json:"GroupRef,omitempty"`
|
||||
Annotations []string `protobuf:"bytes,31,rep,name=Annotations,proto3" json:"Annotations,omitempty"`
|
||||
WithProvenanceResponse bool `protobuf:"varint,32,opt,name=WithProvenanceResponse,proto3" json:"WithProvenanceResponse,omitempty"`
|
||||
ProvenanceResponseMode string `protobuf:"bytes,32,opt,name=ProvenanceResponseMode,proto3" json:"ProvenanceResponseMode,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -346,9 +346,9 @@ func (m *BuildOptions) GetDockerfileName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BuildOptions) GetPrintFunc() *PrintFunc {
|
||||
func (m *BuildOptions) GetCallFunc() *CallFunc {
|
||||
if m != nil {
|
||||
return m.PrintFunc
|
||||
return m.CallFunc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -549,11 +549,11 @@ func (m *BuildOptions) GetAnnotations() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BuildOptions) GetWithProvenanceResponse() bool {
|
||||
func (m *BuildOptions) GetProvenanceResponseMode() string {
|
||||
if m != nil {
|
||||
return m.WithProvenanceResponse
|
||||
return m.ProvenanceResponseMode
|
||||
}
|
||||
return false
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExportEntry struct {
|
||||
@@ -810,7 +810,7 @@ func (m *Secret) GetEnv() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type PrintFunc struct {
|
||||
type CallFunc struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||
Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"`
|
||||
IgnoreStatus bool `protobuf:"varint,3,opt,name=IgnoreStatus,proto3" json:"IgnoreStatus,omitempty"`
|
||||
@@ -819,45 +819,45 @@ type PrintFunc struct {
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PrintFunc) Reset() { *m = PrintFunc{} }
|
||||
func (m *PrintFunc) String() string { return proto.CompactTextString(m) }
|
||||
func (*PrintFunc) ProtoMessage() {}
|
||||
func (*PrintFunc) Descriptor() ([]byte, []int) {
|
||||
func (m *CallFunc) Reset() { *m = CallFunc{} }
|
||||
func (m *CallFunc) String() string { return proto.CompactTextString(m) }
|
||||
func (*CallFunc) ProtoMessage() {}
|
||||
func (*CallFunc) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ed7f10298fa1d90f, []int{12}
|
||||
}
|
||||
func (m *PrintFunc) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PrintFunc.Unmarshal(m, b)
|
||||
func (m *CallFunc) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CallFunc.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PrintFunc.Marshal(b, m, deterministic)
|
||||
func (m *CallFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CallFunc.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PrintFunc.Merge(m, src)
|
||||
func (m *CallFunc) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CallFunc.Merge(m, src)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Size() int {
|
||||
return xxx_messageInfo_PrintFunc.Size(m)
|
||||
func (m *CallFunc) XXX_Size() int {
|
||||
return xxx_messageInfo_CallFunc.Size(m)
|
||||
}
|
||||
func (m *PrintFunc) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PrintFunc.DiscardUnknown(m)
|
||||
func (m *CallFunc) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CallFunc.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PrintFunc proto.InternalMessageInfo
|
||||
var xxx_messageInfo_CallFunc proto.InternalMessageInfo
|
||||
|
||||
func (m *PrintFunc) GetName() string {
|
||||
func (m *CallFunc) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PrintFunc) GetFormat() string {
|
||||
func (m *CallFunc) GetFormat() string {
|
||||
if m != nil {
|
||||
return m.Format
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PrintFunc) GetIgnoreStatus() bool {
|
||||
func (m *CallFunc) GetIgnoreStatus() bool {
|
||||
if m != nil {
|
||||
return m.IgnoreStatus
|
||||
}
|
||||
@@ -2062,7 +2062,7 @@ func init() {
|
||||
proto.RegisterType((*Attest)(nil), "buildx.controller.v1.Attest")
|
||||
proto.RegisterType((*SSH)(nil), "buildx.controller.v1.SSH")
|
||||
proto.RegisterType((*Secret)(nil), "buildx.controller.v1.Secret")
|
||||
proto.RegisterType((*PrintFunc)(nil), "buildx.controller.v1.PrintFunc")
|
||||
proto.RegisterType((*CallFunc)(nil), "buildx.controller.v1.CallFunc")
|
||||
proto.RegisterType((*InspectRequest)(nil), "buildx.controller.v1.InspectRequest")
|
||||
proto.RegisterType((*InspectResponse)(nil), "buildx.controller.v1.InspectResponse")
|
||||
proto.RegisterType((*UlimitOpt)(nil), "buildx.controller.v1.UlimitOpt")
|
||||
@@ -2094,130 +2094,130 @@ func init() {
|
||||
func init() { proto.RegisterFile("controller.proto", fileDescriptor_ed7f10298fa1d90f) }
|
||||
|
||||
var fileDescriptor_ed7f10298fa1d90f = []byte{
|
||||
// 1960 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x73, 0x1b, 0x49,
|
||||
0x11, 0x67, 0x25, 0x59, 0x7f, 0x5a, 0x96, 0xcf, 0x19, 0x9c, 0x30, 0xd9, 0xe4, 0x12, 0x67, 0x93,
|
||||
0x1c, 0x2a, 0x42, 0xc9, 0x77, 0x3e, 0x72, 0xb9, 0x5c, 0xee, 0xaa, 0xb0, 0x65, 0x0b, 0xfb, 0x2a,
|
||||
0xb1, 0x5d, 0x23, 0x27, 0x29, 0xb8, 0x2a, 0xae, 0x56, 0xd2, 0x58, 0xde, 0xd2, 0x6a, 0x47, 0xec,
|
||||
0x8c, 0x64, 0x8b, 0x27, 0x1e, 0xe0, 0x8d, 0xe2, 0x7b, 0x50, 0x7c, 0x04, 0x9e, 0x78, 0xe3, 0xe3,
|
||||
0xf0, 0x11, 0xa8, 0xf9, 0xb3, 0xab, 0x5d, 0x4b, 0x2b, 0xdb, 0xf0, 0xa4, 0xe9, 0x9e, 0x5f, 0x77,
|
||||
0x4f, 0xf7, 0xf6, 0x74, 0xf7, 0x08, 0xd6, 0xbb, 0x2c, 0x10, 0x21, 0xf3, 0x7d, 0x1a, 0x36, 0x46,
|
||||
0x21, 0x13, 0x0c, 0x6d, 0x74, 0xc6, 0x9e, 0xdf, 0xbb, 0x6c, 0x24, 0x36, 0x26, 0x5f, 0xd8, 0x6f,
|
||||
0xfa, 0x9e, 0x38, 0x1f, 0x77, 0x1a, 0x5d, 0x36, 0xdc, 0x1a, 0xb2, 0xce, 0x74, 0x4b, 0xa1, 0x06,
|
||||
0x9e, 0xd8, 0x72, 0x47, 0xde, 0x16, 0xa7, 0xe1, 0xc4, 0xeb, 0x52, 0xbe, 0x65, 0x84, 0xa2, 0x5f,
|
||||
0xad, 0xd2, 0x7e, 0x99, 0x29, 0xcc, 0xd9, 0x38, 0xec, 0xd2, 0x11, 0xf3, 0xbd, 0xee, 0x74, 0x6b,
|
||||
0xd4, 0xd9, 0xd2, 0x2b, 0x2d, 0xe6, 0xd4, 0x61, 0xe3, 0xad, 0xc7, 0xc5, 0x49, 0xc8, 0xba, 0x94,
|
||||
0x73, 0xca, 0x09, 0xfd, 0xc3, 0x98, 0x72, 0x81, 0xd6, 0x21, 0x4f, 0xe8, 0x19, 0xb6, 0x36, 0xad,
|
||||
0x7a, 0x85, 0xc8, 0xa5, 0x73, 0x02, 0x77, 0xaf, 0x20, 0xf9, 0x88, 0x05, 0x9c, 0xa2, 0x57, 0xb0,
|
||||
0x72, 0x18, 0x9c, 0x31, 0x8e, 0xad, 0xcd, 0x7c, 0xbd, 0xba, 0xfd, 0xa4, 0xb1, 0xc8, 0xb9, 0x86,
|
||||
0x91, 0x93, 0x48, 0xa2, 0xf1, 0x0e, 0x87, 0x6a, 0x82, 0x8b, 0x1e, 0x42, 0x25, 0x22, 0xf7, 0x8c,
|
||||
0xe1, 0x19, 0x03, 0xb5, 0x60, 0xf5, 0x30, 0x98, 0xb0, 0x01, 0x6d, 0xb2, 0xe0, 0xcc, 0xeb, 0xe3,
|
||||
0xdc, 0xa6, 0x55, 0xaf, 0x6e, 0x3b, 0x8b, 0x8d, 0x25, 0x91, 0x24, 0x25, 0xe7, 0x7c, 0x0f, 0x78,
|
||||
0xcf, 0xe3, 0x5d, 0x16, 0x04, 0xb4, 0x1b, 0x39, 0x93, 0xe9, 0x74, 0xfa, 0x4c, 0xb9, 0x2b, 0x67,
|
||||
0x72, 0x1e, 0xc0, 0xfd, 0x05, 0xba, 0x74, 0x58, 0x9c, 0xdf, 0xc3, 0xea, 0xae, 0x3c, 0x5b, 0xb6,
|
||||
0xf2, 0x6f, 0xa1, 0x74, 0x3c, 0x12, 0x1e, 0x0b, 0xf8, 0x72, 0x6f, 0x94, 0x1a, 0x83, 0x24, 0x91,
|
||||
0x88, 0xf3, 0xef, 0x55, 0x63, 0xc0, 0x30, 0xd0, 0x26, 0x54, 0x9b, 0x2c, 0x10, 0xf4, 0x52, 0x9c,
|
||||
0xb8, 0xe2, 0xdc, 0x18, 0x4a, 0xb2, 0xd0, 0x67, 0xb0, 0xb6, 0xc7, 0xba, 0x03, 0x1a, 0x9e, 0x79,
|
||||
0x3e, 0x3d, 0x72, 0x87, 0xd4, 0xb8, 0x74, 0x85, 0x8b, 0xbe, 0x93, 0x5e, 0x7b, 0x81, 0x68, 0x8d,
|
||||
0x83, 0x2e, 0xce, 0xab, 0xa3, 0x3d, 0xce, 0xfa, 0xaa, 0x06, 0x46, 0x66, 0x12, 0xe8, 0x07, 0xa8,
|
||||
0x49, 0x35, 0x3d, 0x63, 0x9a, 0xe3, 0x82, 0x4a, 0x8c, 0x97, 0xd7, 0x7b, 0xd7, 0x48, 0xc9, 0xed,
|
||||
0x07, 0x22, 0x9c, 0x92, 0xb4, 0x2e, 0xb4, 0x01, 0x2b, 0x3b, 0xbe, 0xcf, 0x2e, 0xf0, 0xca, 0x66,
|
||||
0xbe, 0x5e, 0x21, 0x9a, 0x40, 0x5f, 0x41, 0x69, 0x47, 0x08, 0xca, 0x05, 0xc7, 0x45, 0x65, 0xec,
|
||||
0xe1, 0x62, 0x63, 0x1a, 0x44, 0x22, 0x30, 0x3a, 0x86, 0x8a, 0xb2, 0xbf, 0x13, 0xf6, 0x39, 0x2e,
|
||||
0x29, 0xc9, 0x2f, 0x6e, 0x70, 0xcc, 0x58, 0x46, 0x1f, 0x71, 0xa6, 0x03, 0xed, 0x43, 0xa5, 0xe9,
|
||||
0x76, 0xcf, 0x69, 0x2b, 0x64, 0x43, 0x5c, 0x56, 0x0a, 0x7f, 0xbe, 0x58, 0xa1, 0x82, 0x19, 0x85,
|
||||
0x46, 0x4d, 0x2c, 0x89, 0x76, 0xa0, 0xa4, 0x88, 0x53, 0x86, 0x2b, 0xb7, 0x53, 0x12, 0xc9, 0x21,
|
||||
0x07, 0x56, 0x9b, 0xfd, 0x90, 0x8d, 0x47, 0x27, 0x6e, 0x48, 0x03, 0x81, 0x41, 0x7d, 0xea, 0x14,
|
||||
0x0f, 0xbd, 0x81, 0xd2, 0xfe, 0xe5, 0x88, 0x85, 0x82, 0xe3, 0xea, 0xb2, 0xcb, 0xab, 0x41, 0xc6,
|
||||
0x80, 0x91, 0x40, 0x8f, 0x00, 0xf6, 0x2f, 0x45, 0xe8, 0x1e, 0x30, 0x19, 0xf6, 0x55, 0xf5, 0x39,
|
||||
0x12, 0x1c, 0xd4, 0x82, 0xe2, 0x5b, 0xb7, 0x43, 0x7d, 0x8e, 0x6b, 0x4a, 0x77, 0xe3, 0x06, 0x81,
|
||||
0xd5, 0x02, 0xda, 0x90, 0x91, 0x96, 0x79, 0x7d, 0x44, 0xc5, 0x05, 0x0b, 0x07, 0xef, 0x58, 0x8f,
|
||||
0xe2, 0x35, 0x9d, 0xd7, 0x09, 0x16, 0x7a, 0x06, 0xb5, 0x23, 0xa6, 0x83, 0xe7, 0xf9, 0x82, 0x86,
|
||||
0xf8, 0x13, 0x75, 0x98, 0x34, 0x53, 0xdd, 0x65, 0xdf, 0x15, 0x67, 0x2c, 0x1c, 0x72, 0xbc, 0xae,
|
||||
0x10, 0x33, 0x86, 0xcc, 0xa0, 0x36, 0xed, 0x86, 0x54, 0x70, 0x7c, 0x67, 0x59, 0x06, 0x69, 0x10,
|
||||
0x89, 0xc0, 0x08, 0x43, 0xa9, 0x7d, 0x3e, 0x6c, 0x7b, 0x7f, 0xa4, 0x18, 0x6d, 0x5a, 0xf5, 0x3c,
|
||||
0x89, 0x48, 0xf4, 0x02, 0xf2, 0xed, 0xf6, 0x01, 0xfe, 0xa9, 0xd2, 0x76, 0x3f, 0x43, 0x5b, 0xfb,
|
||||
0x80, 0x48, 0x14, 0x42, 0x50, 0x38, 0x75, 0xfb, 0x1c, 0x6f, 0xa8, 0x73, 0xa9, 0x35, 0xba, 0x07,
|
||||
0xc5, 0x53, 0x37, 0xec, 0x53, 0x81, 0xef, 0x2a, 0x9f, 0x0d, 0x85, 0x5e, 0x43, 0xe9, 0xbd, 0xef,
|
||||
0x0d, 0x3d, 0xc1, 0xf1, 0xbd, 0x65, 0x97, 0x53, 0x83, 0x8e, 0x47, 0x82, 0x44, 0x78, 0x79, 0x5a,
|
||||
0x15, 0x6f, 0x1a, 0xe2, 0x9f, 0x29, 0x9d, 0x11, 0x29, 0x77, 0x4c, 0xb8, 0x30, 0xde, 0xb4, 0xea,
|
||||
0x65, 0x12, 0x91, 0xf2, 0x68, 0x27, 0x63, 0xdf, 0xc7, 0xf7, 0x15, 0x5b, 0xad, 0xf5, 0xb7, 0x97,
|
||||
0x69, 0x70, 0x32, 0xe6, 0xe7, 0xd8, 0x56, 0x3b, 0x09, 0xce, 0x6c, 0xff, 0x2d, 0x73, 0x7b, 0xf8,
|
||||
0x41, 0x72, 0x5f, 0x72, 0xd0, 0x21, 0xac, 0xb6, 0x55, 0x5b, 0x3a, 0x51, 0xcd, 0x08, 0x3f, 0x54,
|
||||
0x7e, 0x3c, 0x6f, 0xc8, 0xce, 0xd5, 0x88, 0x3a, 0x97, 0xf4, 0x21, 0xd9, 0xbc, 0x1a, 0x1a, 0x4c,
|
||||
0x52, 0xa2, 0x51, 0x5d, 0xfd, 0x74, 0x56, 0x57, 0x6d, 0x28, 0xff, 0x46, 0x26, 0xb9, 0x64, 0x3f,
|
||||
0x52, 0xec, 0x98, 0x96, 0xc9, 0xb4, 0x13, 0x04, 0x4c, 0xb8, 0xba, 0xee, 0x3e, 0x56, 0xe1, 0x4e,
|
||||
0xb2, 0xd0, 0x57, 0x70, 0xef, 0xa3, 0x27, 0xce, 0x4f, 0x42, 0x36, 0xa1, 0x81, 0x1b, 0x74, 0x69,
|
||||
0x54, 0xd1, 0xf1, 0xa6, 0x72, 0x23, 0x63, 0xd7, 0xfe, 0x35, 0xa0, 0xf9, 0xea, 0x25, 0x4f, 0x37,
|
||||
0xa0, 0xd3, 0xa8, 0xea, 0x0f, 0xe8, 0x54, 0x16, 0xb0, 0x89, 0xeb, 0x8f, 0xa3, 0xda, 0xab, 0x89,
|
||||
0x6f, 0x72, 0x5f, 0x5b, 0xf6, 0xb7, 0xb0, 0x96, 0x2e, 0x2c, 0xb7, 0x92, 0x7e, 0x0d, 0xd5, 0xc4,
|
||||
0xed, 0xb9, 0x8d, 0xa8, 0xf3, 0x2f, 0x0b, 0xaa, 0x89, 0x2b, 0xae, 0x92, 0x71, 0x3a, 0xa2, 0x46,
|
||||
0x58, 0xad, 0xd1, 0x2e, 0xac, 0xec, 0x08, 0x11, 0xca, 0x56, 0x25, 0xf3, 0xf9, 0x97, 0xd7, 0x16,
|
||||
0x8a, 0x86, 0x82, 0xeb, 0xab, 0xac, 0x45, 0x65, 0xf0, 0xf7, 0x28, 0x17, 0x5e, 0xa0, 0x42, 0xad,
|
||||
0x3a, 0x4b, 0x85, 0x24, 0x59, 0xf6, 0xd7, 0x00, 0x33, 0xb1, 0x5b, 0xf9, 0xf0, 0x0f, 0x0b, 0xee,
|
||||
0xcc, 0x55, 0xc3, 0x85, 0x9e, 0x1c, 0xa4, 0x3d, 0xd9, 0xbe, 0x61, 0x65, 0x9d, 0xf7, 0xe7, 0xff,
|
||||
0x38, 0xed, 0x11, 0x14, 0x75, 0x0b, 0x5a, 0x78, 0x42, 0x1b, 0xca, 0x7b, 0x1e, 0x77, 0x3b, 0x3e,
|
||||
0xed, 0x29, 0xd1, 0x32, 0x89, 0x69, 0xd5, 0xff, 0xd4, 0xe9, 0x75, 0xf4, 0x34, 0xe1, 0xe8, 0x5a,
|
||||
0x83, 0xd6, 0x20, 0x17, 0xcf, 0x4e, 0xb9, 0xc3, 0x3d, 0x09, 0x96, 0x8d, 0x5f, 0xbb, 0x5a, 0x21,
|
||||
0x9a, 0x70, 0x5a, 0x50, 0xd4, 0xd5, 0x6b, 0x0e, 0x6f, 0x43, 0xb9, 0xe5, 0xf9, 0x54, 0xcd, 0x0f,
|
||||
0xfa, 0xcc, 0x31, 0x2d, 0xdd, 0xdb, 0x0f, 0x26, 0xc6, 0xac, 0x5c, 0x3a, 0x3f, 0x24, 0xc6, 0x04,
|
||||
0xe9, 0x87, 0x9a, 0x28, 0x8c, 0x1f, 0x6a, 0x8e, 0xb8, 0x07, 0xc5, 0x16, 0x0b, 0x87, 0xae, 0x30,
|
||||
0xca, 0x0c, 0x25, 0x5b, 0xd3, 0x61, 0x3f, 0x60, 0x21, 0x6d, 0x0b, 0x57, 0x8c, 0xb5, 0x2b, 0x65,
|
||||
0x92, 0xe2, 0x39, 0x0e, 0xac, 0x1d, 0x06, 0x7c, 0x44, 0xbb, 0x22, 0x7b, 0x24, 0x3d, 0x86, 0x4f,
|
||||
0x62, 0x8c, 0x19, 0x46, 0x13, 0x33, 0x95, 0x75, 0xfb, 0x99, 0xea, 0xef, 0x16, 0x54, 0xe2, 0xaa,
|
||||
0x89, 0x9a, 0x50, 0x54, 0x5f, 0x2c, 0x9a, 0x6c, 0x5f, 0x5c, 0x53, 0x66, 0x1b, 0x1f, 0x14, 0xda,
|
||||
0x74, 0x2f, 0x2d, 0x6a, 0x7f, 0x84, 0x6a, 0x82, 0xbd, 0x20, 0x49, 0xb6, 0x93, 0x49, 0x92, 0xd9,
|
||||
0x76, 0xb4, 0x91, 0x64, 0x0a, 0xed, 0x41, 0x51, 0x33, 0x17, 0x86, 0x1e, 0x41, 0xe1, 0xc0, 0x0d,
|
||||
0x75, 0xfa, 0xe4, 0x89, 0x5a, 0x4b, 0x5e, 0x9b, 0x9d, 0x09, 0x15, 0xee, 0x3c, 0x51, 0x6b, 0xe7,
|
||||
0x9f, 0x16, 0xd4, 0xcc, 0x98, 0x6a, 0x22, 0x48, 0x61, 0x5d, 0xdf, 0x62, 0x1a, 0xc6, 0x95, 0x4f,
|
||||
0xfb, 0xff, 0x7a, 0x49, 0x28, 0x23, 0x68, 0xe3, 0xaa, 0xac, 0x8e, 0xc6, 0x9c, 0x4a, 0xbb, 0x09,
|
||||
0x77, 0x17, 0x42, 0x6f, 0x75, 0x8d, 0x9e, 0xc3, 0x9d, 0xd9, 0x00, 0x9e, 0x9d, 0x27, 0x1b, 0x80,
|
||||
0x92, 0x30, 0x33, 0xa0, 0x3f, 0x86, 0xaa, 0x7c, 0xd0, 0x64, 0x8b, 0x39, 0xb0, 0xaa, 0x01, 0x26,
|
||||
0x32, 0x08, 0x0a, 0x03, 0x3a, 0xd5, 0xd9, 0x50, 0x21, 0x6a, 0xed, 0xfc, 0xcd, 0x92, 0xef, 0x92,
|
||||
0xd1, 0x58, 0xbc, 0xa3, 0x9c, 0xbb, 0x7d, 0x99, 0x80, 0x85, 0xc3, 0xc0, 0x13, 0x26, 0xfb, 0x3e,
|
||||
0xcb, 0x7a, 0x9f, 0x8c, 0xc6, 0x42, 0xc2, 0x8c, 0xd4, 0xc1, 0x4f, 0x88, 0x92, 0x42, 0xaf, 0xa0,
|
||||
0xb0, 0xe7, 0x0a, 0xd7, 0xe4, 0x42, 0xc6, 0x34, 0x26, 0x11, 0x09, 0x41, 0x49, 0xee, 0x96, 0xe4,
|
||||
0x23, 0x6c, 0x34, 0x16, 0xce, 0x33, 0x58, 0xbf, 0xaa, 0x7d, 0x81, 0x6b, 0x5f, 0x42, 0x35, 0xa1,
|
||||
0x45, 0xdd, 0xed, 0xe3, 0x96, 0x02, 0x94, 0x89, 0x5c, 0x4a, 0x5f, 0xe3, 0x83, 0xac, 0x6a, 0x1b,
|
||||
0xce, 0x27, 0x50, 0x53, 0xaa, 0xe3, 0x08, 0xfe, 0x29, 0x07, 0xa5, 0x48, 0xc5, 0xab, 0x94, 0xdf,
|
||||
0x4f, 0xb2, 0xfc, 0x9e, 0x77, 0xf9, 0x25, 0x14, 0x64, 0x8d, 0x31, 0x2e, 0x67, 0x8c, 0x32, 0xad,
|
||||
0x5e, 0x42, 0x4c, 0xc2, 0xd1, 0x77, 0x50, 0x24, 0x94, 0xcb, 0xb1, 0x4b, 0x3f, 0x50, 0x9e, 0x2e,
|
||||
0x16, 0xd4, 0x98, 0x99, 0xb0, 0x11, 0x92, 0xe2, 0x6d, 0xaf, 0x1f, 0xb8, 0x3e, 0x2e, 0x2c, 0x13,
|
||||
0xd7, 0x98, 0x84, 0xb8, 0x66, 0xcc, 0xc2, 0xfd, 0x17, 0x0b, 0xaa, 0x4b, 0x43, 0xbd, 0xfc, 0x09,
|
||||
0x39, 0xf7, 0xac, 0xcd, 0xff, 0x8f, 0xcf, 0xda, 0x3f, 0xe7, 0xd2, 0x8a, 0xd4, 0x04, 0x26, 0xef,
|
||||
0xd3, 0x88, 0x79, 0x81, 0x30, 0x29, 0x9b, 0xe0, 0xc8, 0x83, 0x36, 0x87, 0x3d, 0xd3, 0x18, 0xe4,
|
||||
0x52, 0x5e, 0xb3, 0x23, 0x26, 0x79, 0x55, 0x95, 0x06, 0x9a, 0x98, 0x95, 0xfd, 0xbc, 0x29, 0xfb,
|
||||
0x32, 0x35, 0xde, 0x73, 0x1a, 0xaa, 0xc0, 0x55, 0x88, 0x5a, 0xcb, 0x4a, 0x7f, 0xc4, 0x14, 0x77,
|
||||
0x45, 0x09, 0x1b, 0x4a, 0x59, 0xb9, 0xe8, 0xe1, 0xa2, 0x0e, 0x47, 0xf3, 0x22, 0xb2, 0x72, 0xd1,
|
||||
0xc3, 0xa5, 0xd8, 0xca, 0x85, 0xb2, 0x72, 0x2a, 0xa6, 0xb8, 0xac, 0x13, 0xf0, 0x54, 0x4c, 0x65,
|
||||
0x2b, 0x22, 0xcc, 0xf7, 0x3b, 0x6e, 0x77, 0x80, 0x2b, 0xba, 0x07, 0x46, 0xb4, 0x9c, 0x55, 0x65,
|
||||
0xcc, 0x3d, 0xd7, 0x57, 0xaf, 0x9a, 0x32, 0x89, 0x48, 0x67, 0x07, 0x2a, 0x71, 0xaa, 0xc8, 0xee,
|
||||
0xd6, 0xea, 0xa9, 0x4f, 0x51, 0x23, 0xb9, 0x56, 0x2f, 0xca, 0xf2, 0xdc, 0x7c, 0x96, 0xe7, 0x13,
|
||||
0x59, 0xfe, 0x0a, 0x6a, 0xa9, 0xa4, 0x91, 0x20, 0xc2, 0x2e, 0xb8, 0x51, 0xa4, 0xd6, 0x92, 0xd7,
|
||||
0x64, 0xbe, 0x7e, 0xb7, 0xd7, 0x88, 0x5a, 0x3b, 0x4f, 0xa1, 0x96, 0x4a, 0x97, 0x45, 0x75, 0xd9,
|
||||
0x79, 0x02, 0x35, 0xdd, 0xe0, 0xb2, 0xcb, 0xce, 0x7f, 0x2c, 0x58, 0x8b, 0x30, 0xa6, 0xf2, 0xfc,
|
||||
0x0a, 0xca, 0x13, 0x1a, 0x0a, 0x7a, 0x19, 0xf7, 0x22, 0x3c, 0x3f, 0x2a, 0x7f, 0x50, 0x08, 0x12,
|
||||
0x23, 0xd1, 0x37, 0x50, 0xe6, 0x4a, 0x0f, 0x8d, 0x66, 0x9d, 0x47, 0x59, 0x52, 0xc6, 0x5e, 0x8c,
|
||||
0x47, 0x5b, 0x50, 0xf0, 0x59, 0x9f, 0xab, 0xef, 0x5e, 0xdd, 0x7e, 0x90, 0x25, 0xf7, 0x96, 0xf5,
|
||||
0x89, 0x02, 0xa2, 0x37, 0x50, 0xbe, 0x70, 0xc3, 0xc0, 0x0b, 0xfa, 0xd1, 0x7b, 0xff, 0x71, 0x96,
|
||||
0xd0, 0x47, 0x8d, 0x23, 0xb1, 0x80, 0x53, 0x93, 0x97, 0xe8, 0x8c, 0x99, 0x98, 0x38, 0xbf, 0x95,
|
||||
0xb9, 0x2c, 0x49, 0xe3, 0xfe, 0x21, 0xd4, 0xf4, 0x7d, 0xf8, 0x40, 0x43, 0x2e, 0x27, 0x47, 0x6b,
|
||||
0xd9, 0x9d, 0xdd, 0x4d, 0x42, 0x49, 0x5a, 0xd2, 0xf9, 0xd1, 0xb4, 0xbb, 0x88, 0x21, 0x73, 0x69,
|
||||
0xe4, 0x76, 0x07, 0x6e, 0x3f, 0xfa, 0x4e, 0x11, 0x29, 0x77, 0x26, 0xc6, 0x9e, 0xbe, 0xb6, 0x11,
|
||||
0x29, 0x73, 0x33, 0xa4, 0x13, 0x8f, 0xcf, 0x86, 0xd8, 0x98, 0xde, 0xfe, 0x6b, 0x09, 0xa0, 0x19,
|
||||
0x9f, 0x07, 0x9d, 0xc0, 0x8a, 0xb2, 0x87, 0x9c, 0xa5, 0xcd, 0x53, 0xf9, 0x6d, 0x3f, 0xbd, 0x41,
|
||||
0x83, 0x45, 0x1f, 0x64, 0xf2, 0xab, 0xa1, 0x07, 0x3d, 0xcb, 0x2a, 0x13, 0xc9, 0xb9, 0xc9, 0x7e,
|
||||
0x7e, 0x0d, 0xca, 0xe8, 0x7d, 0x0f, 0x45, 0x9d, 0x05, 0x28, 0xab, 0x16, 0x26, 0xf3, 0xd6, 0x7e,
|
||||
0xb6, 0x1c, 0xa4, 0x95, 0x7e, 0x6e, 0x21, 0x62, 0x2a, 0x25, 0x72, 0x96, 0xb4, 0x42, 0x73, 0x63,
|
||||
0xb2, 0x02, 0x90, 0xea, 0x3a, 0x75, 0x0b, 0x7d, 0x0f, 0x45, 0x5d, 0xeb, 0xd0, 0xa7, 0x8b, 0x05,
|
||||
0x22, 0x7d, 0xcb, 0xb7, 0xeb, 0xd6, 0xe7, 0x16, 0x7a, 0x07, 0x05, 0xd9, 0xe4, 0x51, 0x46, 0xc7,
|
||||
0x4a, 0x4c, 0x08, 0xb6, 0xb3, 0x0c, 0x62, 0xa2, 0xf8, 0x23, 0xc0, 0x6c, 0xd4, 0x40, 0x19, 0xff,
|
||||
0xda, 0xcc, 0xcd, 0x2c, 0x76, 0xfd, 0x7a, 0xa0, 0x31, 0xf0, 0x4e, 0xf6, 0xd9, 0x33, 0x86, 0x32,
|
||||
0x3b, 0x6c, 0x7c, 0x8d, 0x6c, 0x67, 0x19, 0xc4, 0xa8, 0x3b, 0x87, 0x5a, 0xea, 0x5f, 0x5d, 0xf4,
|
||||
0x8b, 0x6c, 0x27, 0xaf, 0xfe, 0x49, 0x6c, 0xbf, 0xb8, 0x11, 0xd6, 0x58, 0x12, 0xc9, 0x59, 0xcd,
|
||||
0x6c, 0xa3, 0xc6, 0x75, 0x7e, 0xa7, 0xff, 0xa1, 0xb5, 0xb7, 0x6e, 0x8c, 0xd7, 0x56, 0x77, 0x0b,
|
||||
0xbf, 0xcb, 0x8d, 0x3a, 0x9d, 0xa2, 0xfa, 0xb3, 0xfb, 0xcb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
|
||||
0xc1, 0x07, 0x8b, 0x2b, 0x8a, 0x17, 0x00, 0x00,
|
||||
// 1961 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x73, 0x1b, 0xb7,
|
||||
0x11, 0xef, 0x91, 0x14, 0xff, 0x2c, 0x45, 0xd9, 0x46, 0x6d, 0x17, 0x3e, 0x3b, 0xb6, 0x7c, 0xb6,
|
||||
0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x9d, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6,
|
||||
0xa4, 0x01, 0x65, 0x67, 0x9a, 0xce, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00,
|
||||
0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xa1, 0xdf,
|
||||
0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3c, 0x11, 0xbb, 0xf8, 0xed,
|
||||
0x2e, 0x76, 0x6f, 0xb1, 0xbb, 0x20, 0xdc, 0xee, 0x89, 0x48, 0xc5, 0x22, 0x0c, 0x79, 0xdc, 0x1a,
|
||||
0xc6, 0x42, 0x09, 0x72, 0xf7, 0x60, 0x14, 0x84, 0xfd, 0xf3, 0x56, 0x66, 0xe3, 0xf4, 0x73, 0xf7,
|
||||
0xed, 0x20, 0x50, 0x47, 0xa3, 0x83, 0x56, 0x4f, 0x9c, 0xac, 0x9c, 0x88, 0x83, 0x8b, 0x15, 0x44,
|
||||
0x1d, 0x07, 0x6a, 0xc5, 0x1f, 0x06, 0x2b, 0x92, 0xc7, 0xa7, 0x41, 0x8f, 0xcb, 0x15, 0x2b, 0x94,
|
||||
0xfc, 0x1a, 0x95, 0xee, 0xab, 0x5c, 0x61, 0x29, 0x46, 0x71, 0x8f, 0x0f, 0x45, 0x18, 0xf4, 0x2e,
|
||||
0x56, 0x86, 0x07, 0x2b, 0x66, 0x65, 0xc4, 0xbc, 0x26, 0xdc, 0x7d, 0x17, 0x48, 0xb5, 0x17, 0x8b,
|
||||
0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x38, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a,
|
||||
0xcb, 0x4e, 0xb3, 0xc6, 0xf4, 0xd2, 0xdb, 0x83, 0x7b, 0x97, 0x90, 0x72, 0x28, 0x22, 0xc9, 0xc9,
|
||||
0x6b, 0x58, 0xd8, 0x8e, 0x0e, 0x85, 0xa4, 0xce, 0x72, 0xb1, 0x59, 0x5f, 0x7d, 0xda, 0x9a, 0xe5,
|
||||
0x5c, 0xcb, 0xca, 0x69, 0x24, 0x33, 0x78, 0x4f, 0x42, 0x3d, 0xc3, 0x25, 0x8f, 0xa0, 0x96, 0x90,
|
||||
0x1b, 0xd6, 0xf0, 0x98, 0x41, 0x3a, 0xb0, 0xb8, 0x1d, 0x9d, 0x8a, 0x63, 0xde, 0x16, 0xd1, 0x61,
|
||||
0x30, 0xa0, 0x85, 0x65, 0xa7, 0x59, 0x5f, 0xf5, 0x66, 0x1b, 0xcb, 0x22, 0xd9, 0x84, 0x9c, 0xf7,
|
||||
0x2d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a,
|
||||
0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x01, 0x16, 0xd7, 0xf5,
|
||||
0xd9, 0xf2, 0x95, 0x7f, 0x03, 0x95, 0xdd, 0xa1, 0x0a, 0x44, 0x24, 0xe7, 0x7b, 0x83, 0x6a, 0x2c,
|
||||
0x92, 0x25, 0x22, 0xde, 0x7f, 0x16, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1,
|
||||
0x73, 0xb5, 0xe7, 0xab, 0x23, 0x6b, 0x28, 0xcb, 0x22, 0x9f, 0xc2, 0xd2, 0x86, 0xe8, 0x1d, 0xf3,
|
||||
0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x6b, 0xa8, 0xb6, 0xfd,
|
||||
0x30, 0xec, 0x8c, 0xa2, 0x1e, 0x2d, 0xe2, 0xc9, 0x1e, 0xcf, 0x3e, 0x59, 0x82, 0x62, 0x29, 0x9e,
|
||||
0xfc, 0x1e, 0x1a, 0x5a, 0x47, 0xdf, 0xda, 0x95, 0xb4, 0x84, 0x59, 0xf1, 0xea, 0x6a, 0xd7, 0x5a,
|
||||
0x13, 0x72, 0x9b, 0x91, 0x8a, 0x2f, 0xd8, 0xa4, 0x2e, 0x72, 0x17, 0x16, 0xd6, 0xc2, 0x50, 0x9c,
|
||||
0xd1, 0x85, 0xe5, 0x62, 0xb3, 0xc6, 0x0c, 0x41, 0xbe, 0x84, 0xca, 0x9a, 0x52, 0x5c, 0x2a, 0x49,
|
||||
0xcb, 0x68, 0xec, 0xd1, 0x6c, 0x63, 0x06, 0xc4, 0x12, 0x30, 0xd9, 0x85, 0x1a, 0xda, 0x5f, 0x8b,
|
||||
0x07, 0x92, 0x56, 0x50, 0xf2, 0xf3, 0x6b, 0x1c, 0x33, 0x95, 0x31, 0x47, 0x1c, 0xeb, 0x20, 0x9b,
|
||||
0x50, 0x6b, 0xfb, 0xbd, 0x23, 0xde, 0x89, 0xc5, 0x09, 0xad, 0xa2, 0xc2, 0x9f, 0xe7, 0x05, 0xae,
|
||||
0x77, 0xc4, 0xad, 0x42, 0xab, 0x26, 0x95, 0x24, 0x6b, 0x50, 0x41, 0x62, 0x5f, 0xd0, 0xda, 0xcd,
|
||||
0x94, 0x24, 0x72, 0xc4, 0x83, 0xc5, 0xf6, 0x20, 0x16, 0xa3, 0xe1, 0x9e, 0x1f, 0xf3, 0x48, 0x51,
|
||||
0xc0, 0xef, 0x3c, 0xc1, 0x23, 0x6f, 0xa1, 0xb2, 0x79, 0x3e, 0x14, 0xb1, 0x92, 0xb4, 0x3e, 0xef,
|
||||
0xe6, 0x1a, 0x90, 0x35, 0x60, 0x25, 0xc8, 0x63, 0x80, 0xcd, 0x73, 0x15, 0xfb, 0x5b, 0x42, 0x87,
|
||||
0x7d, 0x11, 0x3f, 0x47, 0x86, 0x43, 0x3a, 0x50, 0x7e, 0xe7, 0x1f, 0xf0, 0x50, 0xd2, 0x06, 0xea,
|
||||
0x6e, 0x5d, 0x23, 0xb0, 0x46, 0xc0, 0x18, 0xb2, 0xd2, 0x3a, 0xa9, 0x77, 0xb8, 0x3a, 0x13, 0xf1,
|
||||
0xf1, 0x7b, 0xd1, 0xe7, 0x74, 0xc9, 0x24, 0x75, 0x86, 0x45, 0x9e, 0x43, 0x63, 0x47, 0x98, 0xe0,
|
||||
0x05, 0xa1, 0xe2, 0x31, 0xbd, 0x85, 0x87, 0x99, 0x64, 0xe2, 0x45, 0x0e, 0x7d, 0x75, 0x28, 0xe2,
|
||||
0x13, 0x49, 0x6f, 0x23, 0x62, 0xcc, 0xd0, 0x19, 0xd4, 0xe5, 0xbd, 0x98, 0x2b, 0x49, 0xef, 0xcc,
|
||||
0xcb, 0x20, 0x03, 0x62, 0x09, 0x98, 0x50, 0xa8, 0x74, 0x8f, 0x4e, 0xba, 0xc1, 0x9f, 0x38, 0x25,
|
||||
0xcb, 0x4e, 0xb3, 0xc8, 0x12, 0x92, 0xbc, 0x84, 0x62, 0xb7, 0xbb, 0x45, 0x7f, 0x8a, 0xda, 0x1e,
|
||||
0xe4, 0x68, 0xeb, 0x6e, 0x31, 0x8d, 0x22, 0x04, 0x4a, 0xfb, 0xfe, 0x40, 0xd2, 0xbb, 0x78, 0x2e,
|
||||
0x5c, 0x93, 0xfb, 0x50, 0xde, 0xf7, 0xe3, 0x01, 0x57, 0xf4, 0x1e, 0xfa, 0x6c, 0x29, 0xf2, 0x06,
|
||||
0x2a, 0x1f, 0xc2, 0xe0, 0x24, 0x50, 0x92, 0xde, 0xc7, 0xab, 0xf9, 0x64, 0xb6, 0x72, 0x03, 0xda,
|
||||
0x1d, 0x2a, 0x96, 0xe0, 0xf5, 0x69, 0x31, 0xde, 0x3c, 0xa6, 0x3f, 0x43, 0x9d, 0x09, 0xa9, 0x77,
|
||||
0x6c, 0xb8, 0x28, 0x5d, 0x76, 0x9a, 0x55, 0x96, 0x90, 0xfa, 0x68, 0x7b, 0xa3, 0x30, 0xa4, 0x0f,
|
||||
0x90, 0x8d, 0x6b, 0xf3, 0xed, 0x75, 0x1a, 0xec, 0x8d, 0xe4, 0x11, 0x75, 0x71, 0x27, 0xc3, 0x19,
|
||||
0xef, 0xbf, 0x13, 0x7e, 0x9f, 0x3e, 0xcc, 0xee, 0x6b, 0x0e, 0xd9, 0x86, 0xc5, 0x2e, 0xf6, 0xa4,
|
||||
0x3d, 0xec, 0x44, 0xf4, 0x11, 0xfa, 0xf1, 0xa2, 0xa5, 0xdb, 0x56, 0x2b, 0x69, 0x5b, 0xda, 0x87,
|
||||
0x6c, 0xe7, 0x6a, 0x19, 0x30, 0x9b, 0x10, 0x4d, 0x8a, 0xea, 0x27, 0xe3, 0xa2, 0xea, 0x42, 0xf5,
|
||||
0xb7, 0x3a, 0xc9, 0x35, 0xfb, 0x31, 0xb2, 0x53, 0x5a, 0x27, 0xd3, 0x5a, 0x14, 0x09, 0xe5, 0x9b,
|
||||
0xa2, 0xfb, 0x04, 0xc3, 0x9d, 0x65, 0x91, 0x2f, 0xe1, 0xfe, 0x5e, 0x2c, 0x4e, 0x79, 0xe4, 0x47,
|
||||
0x3d, 0x9e, 0x94, 0x72, 0xcc, 0xbc, 0x65, 0xd4, 0x95, 0xb3, 0xeb, 0xfe, 0x06, 0xc8, 0x74, 0xf5,
|
||||
0xd2, 0xa7, 0x3b, 0xe6, 0x17, 0x49, 0xc9, 0x3f, 0xe6, 0x17, 0xba, 0x80, 0x9d, 0xfa, 0xe1, 0x28,
|
||||
0x29, 0xbc, 0x86, 0xf8, 0xba, 0xf0, 0x95, 0xe3, 0x7e, 0x03, 0x4b, 0x93, 0x85, 0xe5, 0x46, 0xd2,
|
||||
0x6f, 0xa0, 0x9e, 0xb9, 0x3d, 0x37, 0x11, 0xf5, 0xfe, 0xed, 0x40, 0x3d, 0x73, 0xc5, 0x31, 0x19,
|
||||
0x2f, 0x86, 0xdc, 0x0a, 0xe3, 0x9a, 0xac, 0xc3, 0xc2, 0x9a, 0x52, 0xb1, 0xee, 0x53, 0x3a, 0x9f,
|
||||
0x7f, 0x79, 0x65, 0xa1, 0x68, 0x21, 0xdc, 0x5c, 0x65, 0x23, 0xaa, 0x83, 0xbf, 0xc1, 0xa5, 0x0a,
|
||||
0x22, 0x0c, 0x35, 0xf6, 0x95, 0x1a, 0xcb, 0xb2, 0xdc, 0xaf, 0x00, 0xc6, 0x62, 0x37, 0xf2, 0xe1,
|
||||
0x9f, 0x0e, 0xdc, 0x99, 0xaa, 0x86, 0x33, 0x3d, 0xd9, 0x9a, 0xf4, 0x64, 0xf5, 0x9a, 0x95, 0x75,
|
||||
0xda, 0x9f, 0x1f, 0x71, 0xda, 0x1d, 0x28, 0x9b, 0x16, 0x34, 0xf3, 0x84, 0x2e, 0x54, 0x37, 0x02,
|
||||
0xe9, 0x1f, 0x84, 0xbc, 0x8f, 0xa2, 0x55, 0x96, 0xd2, 0xd8, 0xff, 0xf0, 0xf4, 0x26, 0x7a, 0x86,
|
||||
0xf0, 0x4c, 0xad, 0x21, 0x4b, 0x50, 0x48, 0x07, 0xa7, 0xc2, 0xf6, 0x86, 0x06, 0xeb, 0xae, 0x6f,
|
||||
0x5c, 0xad, 0x31, 0x43, 0x78, 0x1d, 0x28, 0x9b, 0xea, 0x35, 0x85, 0x77, 0xa1, 0xda, 0x09, 0x42,
|
||||
0x8e, 0xc3, 0x83, 0x39, 0x73, 0x4a, 0x6b, 0xf7, 0x36, 0xa3, 0x53, 0x6b, 0x56, 0x2f, 0xbd, 0xef,
|
||||
0xc7, 0x33, 0x82, 0x76, 0x03, 0xa7, 0x09, 0xeb, 0x06, 0xce, 0x10, 0xf7, 0xa1, 0xdc, 0x11, 0xf1,
|
||||
0x89, 0xaf, 0xac, 0x2e, 0x4b, 0xe9, 0xce, 0xb4, 0x3d, 0x88, 0x44, 0xcc, 0xbb, 0xca, 0x57, 0x23,
|
||||
0xe3, 0x49, 0x95, 0x4d, 0xf0, 0x3c, 0x0f, 0x96, 0xb6, 0x23, 0x39, 0xe4, 0x3d, 0x95, 0x3f, 0x8e,
|
||||
0xee, 0xc2, 0xad, 0x14, 0x63, 0x07, 0xd1, 0xcc, 0x3c, 0xe5, 0xdc, 0x7c, 0x9e, 0xfa, 0x87, 0x03,
|
||||
0xb5, 0xb4, 0x68, 0x92, 0x36, 0x94, 0xf1, 0x83, 0x25, 0x53, 0xed, 0xcb, 0x2b, 0xaa, 0x6c, 0xeb,
|
||||
0x23, 0xa2, 0x6d, 0xf3, 0x32, 0xa2, 0xee, 0x77, 0x50, 0xcf, 0xb0, 0x67, 0xe4, 0xc8, 0x6a, 0x36,
|
||||
0x47, 0x72, 0xbb, 0x8e, 0x31, 0x92, 0xcd, 0xa0, 0x0d, 0x28, 0x1b, 0xe6, 0xcc, 0xd0, 0x13, 0x28,
|
||||
0x6d, 0xf9, 0xb1, 0xc9, 0x9e, 0x22, 0xc3, 0xb5, 0xe6, 0x75, 0xc5, 0xa1, 0xc2, 0x70, 0x17, 0x19,
|
||||
0xae, 0xbd, 0x7f, 0x39, 0xd0, 0xb0, 0x23, 0xaa, 0x8d, 0x20, 0x87, 0xdb, 0xe6, 0x12, 0xf3, 0x38,
|
||||
0xe1, 0x59, 0xff, 0xdf, 0xcc, 0x09, 0x65, 0x02, 0x6d, 0x5d, 0x96, 0x35, 0xd1, 0x98, 0x52, 0xe9,
|
||||
0xb6, 0xe1, 0xde, 0x4c, 0xe8, 0x8d, 0x6e, 0xd1, 0x0b, 0xb8, 0x33, 0x1e, 0xbe, 0xf3, 0xf3, 0xe4,
|
||||
0x2e, 0x90, 0x2c, 0xcc, 0x0e, 0xe7, 0x4f, 0xa0, 0xae, 0x1f, 0x33, 0xf9, 0x62, 0x1e, 0x2c, 0x1a,
|
||||
0x80, 0x8d, 0x0c, 0x81, 0xd2, 0x31, 0xbf, 0x30, 0xd9, 0x50, 0x63, 0xb8, 0xf6, 0xfe, 0xee, 0xe8,
|
||||
0x37, 0xc9, 0x70, 0xa4, 0xde, 0x73, 0x29, 0xfd, 0x81, 0x4e, 0xc0, 0xd2, 0x76, 0x14, 0x28, 0x9b,
|
||||
0x7d, 0x9f, 0xe6, 0xbd, 0x4d, 0x86, 0x23, 0xa5, 0x61, 0x56, 0x6a, 0xeb, 0x27, 0x0c, 0xa5, 0xc8,
|
||||
0x6b, 0x28, 0x6d, 0xf8, 0xca, 0xb7, 0xb9, 0x90, 0x33, 0x8c, 0x69, 0x44, 0x46, 0x50, 0x93, 0xeb,
|
||||
0x15, 0xfd, 0x00, 0x1b, 0x8e, 0x94, 0xf7, 0x1c, 0x6e, 0x5f, 0xd6, 0x3e, 0xc3, 0xb5, 0x2f, 0xa0,
|
||||
0x9e, 0xd1, 0x82, 0x57, 0x7b, 0xb7, 0x83, 0x80, 0x2a, 0xd3, 0x4b, 0xed, 0x6b, 0x7a, 0x90, 0x45,
|
||||
0x63, 0xc3, 0xbb, 0x05, 0x0d, 0x54, 0x9d, 0x46, 0xf0, 0xcf, 0x05, 0xa8, 0x24, 0x2a, 0x5e, 0x4f,
|
||||
0xf8, 0xfd, 0x34, 0xcf, 0xef, 0x69, 0x97, 0x5f, 0x41, 0x49, 0x97, 0x18, 0xeb, 0x72, 0xce, 0x24,
|
||||
0xd3, 0xe9, 0x67, 0xc4, 0x34, 0x9c, 0xfc, 0x1a, 0xca, 0x8c, 0x4b, 0x3d, 0x75, 0x99, 0xd7, 0xc9,
|
||||
0xb3, 0xd9, 0x82, 0x06, 0x33, 0x16, 0xb6, 0x42, 0x5a, 0xbc, 0x1b, 0x0c, 0x22, 0x3f, 0xa4, 0xa5,
|
||||
0x79, 0xe2, 0x06, 0x93, 0x11, 0x37, 0x8c, 0x71, 0xb8, 0xff, 0xea, 0x40, 0x7d, 0x6e, 0xa8, 0xe7,
|
||||
0x3f, 0x1f, 0xa7, 0x9e, 0xb4, 0xc5, 0xff, 0xf3, 0x49, 0xfb, 0x97, 0xc2, 0xa4, 0x22, 0x1c, 0xc0,
|
||||
0xf4, 0x7d, 0x1a, 0x8a, 0x20, 0x52, 0x36, 0x65, 0x33, 0x1c, 0x7d, 0xd0, 0xf6, 0x49, 0xdf, 0xf6,
|
||||
0x05, 0xbd, 0xd4, 0xd7, 0x6c, 0x47, 0x68, 0x5e, 0x1d, 0xd3, 0xc0, 0x10, 0xe3, 0xaa, 0x5f, 0xb4,
|
||||
0x55, 0x5f, 0xa7, 0xc6, 0x07, 0xc9, 0x63, 0x0c, 0x5c, 0x8d, 0xe1, 0x5a, 0x57, 0xfa, 0x1d, 0x81,
|
||||
0xdc, 0x05, 0x14, 0xb6, 0x14, 0x5a, 0x39, 0xeb, 0xd3, 0xb2, 0x09, 0x47, 0xfb, 0x2c, 0xb1, 0x72,
|
||||
0xd6, 0xa7, 0x95, 0xd4, 0xca, 0x19, 0x5a, 0xd9, 0x57, 0x17, 0xb4, 0x6a, 0x12, 0x70, 0x5f, 0x5d,
|
||||
0xe8, 0x4e, 0xc4, 0x44, 0x18, 0x1e, 0xf8, 0xbd, 0x63, 0x5a, 0x33, 0x2d, 0x30, 0xa1, 0xf5, 0xa8,
|
||||
0xaa, 0x63, 0x1e, 0xf8, 0x21, 0x3e, 0x6a, 0xaa, 0x2c, 0x21, 0xbd, 0x35, 0xa8, 0xa5, 0xa9, 0xa2,
|
||||
0x9b, 0x5b, 0xa7, 0x8f, 0x9f, 0xa2, 0xc1, 0x0a, 0x9d, 0x7e, 0x92, 0xe5, 0x85, 0xe9, 0x2c, 0x2f,
|
||||
0x66, 0xb2, 0xfc, 0x35, 0x34, 0x26, 0x92, 0x46, 0x83, 0x98, 0x38, 0x93, 0x56, 0x11, 0xae, 0x35,
|
||||
0xaf, 0x2d, 0x42, 0xf3, 0x66, 0x6f, 0x30, 0x5c, 0x7b, 0xcf, 0xa0, 0x31, 0x91, 0x2e, 0xb3, 0xea,
|
||||
0xb2, 0xf7, 0x14, 0x1a, 0xa6, 0xc1, 0xe5, 0x97, 0x9d, 0xff, 0x3a, 0xb0, 0x94, 0x60, 0x6c, 0xe5,
|
||||
0xf9, 0x15, 0x54, 0x4f, 0x79, 0xac, 0xf8, 0x79, 0xda, 0x8b, 0xe8, 0xf4, 0xa4, 0xfc, 0x11, 0x11,
|
||||
0x2c, 0x45, 0xea, 0x27, 0xbc, 0x44, 0x3d, 0x3c, 0x19, 0x75, 0x1e, 0xe7, 0x49, 0x59, 0x7b, 0x29,
|
||||
0x9e, 0xac, 0x40, 0x29, 0x14, 0x03, 0x89, 0xdf, 0xbd, 0xbe, 0xfa, 0x30, 0x4f, 0xee, 0x9d, 0x18,
|
||||
0x30, 0x04, 0x92, 0xb7, 0x50, 0x3d, 0xf3, 0xe3, 0x28, 0x88, 0x06, 0xc9, 0x73, 0xff, 0x49, 0x9e,
|
||||
0xd0, 0x77, 0x06, 0xc7, 0x52, 0x01, 0xaf, 0xa1, 0x2f, 0xd1, 0xa1, 0xb0, 0x31, 0xf1, 0x7e, 0xa7,
|
||||
0x73, 0x59, 0x93, 0xd6, 0xfd, 0x6d, 0x68, 0x98, 0xfb, 0xf0, 0x91, 0xc7, 0x52, 0x0f, 0x8e, 0xce,
|
||||
0xbc, 0x3b, 0xbb, 0x9e, 0x85, 0xb2, 0x49, 0x49, 0xef, 0x07, 0xdb, 0xee, 0x12, 0x86, 0xce, 0xa5,
|
||||
0xa1, 0xdf, 0x3b, 0xf6, 0x07, 0xc9, 0x77, 0x4a, 0x48, 0xbd, 0x73, 0x6a, 0xed, 0x99, 0x6b, 0x9b,
|
||||
0x90, 0x3a, 0x37, 0x63, 0x7e, 0x1a, 0xc8, 0xf1, 0x0c, 0x9b, 0xd2, 0xab, 0x7f, 0xab, 0x00, 0xb4,
|
||||
0xd3, 0xf3, 0x90, 0x3d, 0x58, 0x40, 0x7b, 0xc4, 0x9b, 0xdb, 0x3c, 0xd1, 0x6f, 0xf7, 0xd9, 0x35,
|
||||
0x1a, 0x2c, 0xf9, 0xa8, 0x93, 0x1f, 0x87, 0x1e, 0xf2, 0x3c, 0xaf, 0x4c, 0x64, 0xe7, 0x26, 0xf7,
|
||||
0xc5, 0x15, 0x28, 0xab, 0xf7, 0x03, 0x94, 0x4d, 0x16, 0x90, 0xbc, 0x5a, 0x98, 0xcd, 0x5b, 0xf7,
|
||||
0xf9, 0x7c, 0x90, 0x51, 0xfa, 0x99, 0x43, 0x98, 0xad, 0x94, 0xc4, 0x9b, 0xd3, 0x0a, 0xed, 0x8d,
|
||||
0xc9, 0x0b, 0xc0, 0x44, 0xd7, 0x69, 0x3a, 0xe4, 0x5b, 0x28, 0x9b, 0x5a, 0x47, 0x3e, 0x99, 0x2d,
|
||||
0x90, 0xe8, 0x9b, 0xbf, 0xdd, 0x74, 0x3e, 0x73, 0xc8, 0x7b, 0x28, 0xe9, 0x26, 0x4f, 0x72, 0x3a,
|
||||
0x56, 0x66, 0x42, 0x70, 0xbd, 0x79, 0x10, 0x1b, 0xc5, 0x1f, 0x00, 0xc6, 0xa3, 0x06, 0xc9, 0xf9,
|
||||
0xd3, 0x66, 0x6a, 0x66, 0x71, 0x9b, 0x57, 0x03, 0xad, 0x81, 0xf7, 0xba, 0xcf, 0x1e, 0x0a, 0x92,
|
||||
0xdb, 0x61, 0xd3, 0x6b, 0xe4, 0x7a, 0xf3, 0x20, 0x56, 0xdd, 0x11, 0x34, 0x26, 0xfe, 0xd1, 0x25,
|
||||
0xbf, 0xc8, 0x77, 0xf2, 0xf2, 0x1f, 0xc4, 0xee, 0xcb, 0x6b, 0x61, 0xad, 0x25, 0x95, 0x9d, 0xd5,
|
||||
0xec, 0x36, 0x69, 0x5d, 0xe5, 0xf7, 0xe4, 0xbf, 0xb3, 0xee, 0xca, 0xb5, 0xf1, 0xc6, 0xea, 0x7a,
|
||||
0xe9, 0xfb, 0xc2, 0xf0, 0xe0, 0xa0, 0x8c, 0x7f, 0x74, 0x7f, 0xf1, 0xbf, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xc9, 0xe6, 0x4b, 0xb6, 0x86, 0x17, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@@ -49,7 +49,7 @@ message BuildRequest {
|
||||
message BuildOptions {
|
||||
string ContextPath = 1;
|
||||
string DockerfileName = 2;
|
||||
PrintFunc PrintFunc = 3;
|
||||
CallFunc CallFunc = 3;
|
||||
map<string, string> NamedContexts = 4;
|
||||
|
||||
repeated string Allow = 5;
|
||||
@@ -80,7 +80,7 @@ message BuildOptions {
|
||||
string Ref = 29;
|
||||
string GroupRef = 30;
|
||||
repeated string Annotations = 31;
|
||||
bool WithProvenanceResponse = 32;
|
||||
string ProvenanceResponseMode = 32;
|
||||
}
|
||||
|
||||
message ExportEntry {
|
||||
@@ -111,7 +111,7 @@ message Secret {
|
||||
string Env = 3;
|
||||
}
|
||||
|
||||
message PrintFunc {
|
||||
message CallFunc {
|
||||
string Name = 1;
|
||||
string Format = 2;
|
||||
bool IgnoreStatus = 3;
|
||||
|
@@ -207,6 +207,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
||||
|
||||
if cfg.signal != nil {
|
||||
eg.Go(func() error {
|
||||
names := signalNames()
|
||||
for {
|
||||
var sig syscall.Signal
|
||||
select {
|
||||
@@ -216,7 +217,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
name := sigToName[sig]
|
||||
name := names[sig]
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -380,12 +381,12 @@ func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
|
||||
})
|
||||
}
|
||||
|
||||
var sigToName = map[syscall.Signal]string{}
|
||||
|
||||
func init() {
|
||||
func signalNames() map[syscall.Signal]string {
|
||||
m := make(map[syscall.Signal]string, len(signal.SignalMap))
|
||||
for name, value := range signal.SignalMap {
|
||||
sigToName[value] = name
|
||||
m[value] = name
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type debugStream struct {
|
||||
|
@@ -7,6 +7,9 @@ variable "DOCS_FORMATS" {
|
||||
variable "DESTDIR" {
|
||||
default = "./bin"
|
||||
}
|
||||
variable "TEST_COVERAGE" {
|
||||
default = null
|
||||
}
|
||||
variable "GOLANGCI_LINT_MULTIPLATFORM" {
|
||||
default = ""
|
||||
}
|
||||
@@ -28,7 +31,7 @@ group "default" {
|
||||
}
|
||||
|
||||
group "validate" {
|
||||
targets = ["lint", "lint-gopls", "validate-vendor", "validate-docs"]
|
||||
targets = ["lint", "lint-gopls", "validate-golangci", "validate-vendor", "validate-docs"]
|
||||
}
|
||||
|
||||
target "lint" {
|
||||
@@ -48,6 +51,14 @@ target "lint" {
|
||||
] : []
|
||||
}
|
||||
|
||||
target "validate-golangci" {
|
||||
description = "Validate .golangci.yml schema (does not run Go linter)"
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
||||
target = "validate-golangci"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "lint-gopls" {
|
||||
inherits = ["lint"]
|
||||
target = "gopls-analyze"
|
||||
@@ -192,6 +203,7 @@ variable "TEST_BUILDKIT_TAG" {
|
||||
target "integration-test-base" {
|
||||
inherits = ["_common"]
|
||||
args = {
|
||||
GO_EXTRA_FLAGS = TEST_COVERAGE == "1" ? "-cover" : null
|
||||
HTTP_PROXY = HTTP_PROXY
|
||||
HTTPS_PROXY = HTTPS_PROXY
|
||||
NO_PROXY = NO_PROXY
|
||||
@@ -205,3 +217,18 @@ target "integration-test" {
|
||||
inherits = ["integration-test-base"]
|
||||
target = "integration-test"
|
||||
}
|
||||
|
||||
variable "GOVULNCHECK_FORMAT" {
|
||||
default = null
|
||||
}
|
||||
|
||||
target "govulncheck" {
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/govulncheck.Dockerfile"
|
||||
target = "output"
|
||||
args = {
|
||||
FORMAT = GOVULNCHECK_FORMAT
|
||||
}
|
||||
no-cache-filter = ["run"]
|
||||
output = ["${DESTDIR}"]
|
||||
}
|
||||
|
@@ -443,8 +443,7 @@ COPY --from=src . .
|
||||
|
||||
#### Use another target as base
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> You should prefer to use regular multi-stage builds over this option. You can
|
||||
> Use this feature when you have multiple Dockerfiles that can't be easily
|
||||
> merged into one.
|
||||
@@ -506,6 +505,25 @@ $ docker buildx bake --print -f - <<< 'target "default" {}'
|
||||
}
|
||||
```
|
||||
|
||||
### `target.entitlements`
|
||||
|
||||
Entitlements are permissions that the build process requires to run.
|
||||
|
||||
Currently supported entitlements are:
|
||||
|
||||
- `network.host`: Allows the build to use commands that access the host network. In Dockerfile, use [`RUN --network=host`](https://docs.docker.com/reference/dockerfile/#run---networkhost) to run a command with host network enabled.
|
||||
|
||||
- `security.insecure`: Allows the build to run commands in privileged containers that are not limited by the default security sandbox. Such container may potentially access and modify system resources. In Dockerfile, use [`RUN --security=insecure`](https://docs.docker.com/reference/dockerfile/#run---security) to run a command in a privileged container.
|
||||
|
||||
```hcl
|
||||
target "integration-tests" {
|
||||
# this target requires privileged containers to run nested containers
|
||||
entitlements = ["security.insecure"]
|
||||
}
|
||||
```
|
||||
|
||||
Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process.
|
||||
|
||||
### `target.inherits`
|
||||
|
||||
A target can inherit attributes from other targets.
|
||||
@@ -750,6 +768,27 @@ target "app" {
|
||||
}
|
||||
```
|
||||
|
||||
### `target.network`
|
||||
|
||||
Specify the network mode for the whole build request. This will override the default network mode
|
||||
for all the `RUN` instructions in the Dockerfile. Accepted values are `default`, `host`, and `none`.
|
||||
|
||||
Usually, a better approach to set the network mode for your build steps is to instead use `RUN --network=<value>`
|
||||
in your Dockerfile. This way, you can set the network mode for individual build steps and everyone building
|
||||
the Dockerfile gets consistent behavior without needing to pass additional flags to the build command.
|
||||
|
||||
If you set network mode to `host` in your Bake file, you must also grant `network.host` entitlement when
|
||||
invoking the `bake` command. This is because `host` network mode requires elevated privileges and can be a security risk.
|
||||
You can pass `--allow=network.host` to the `docker buildx bake` command to grant the entitlement, or you can
|
||||
confirm the entitlement when prompted if you are using an interactive terminal.
|
||||
|
||||
```hcl
|
||||
target "app" {
|
||||
# make sure this build does not access internet
|
||||
network = "none"
|
||||
}
|
||||
```
|
||||
|
||||
### `target.no-cache-filter`
|
||||
|
||||
Don't use build cache for the specified stages.
|
||||
@@ -832,8 +871,8 @@ This lets you [mount the secret][run_mount_secret] in your Dockerfile.
|
||||
```dockerfile
|
||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
||||
aws cloudfront create-invalidation ...
|
||||
RUN --mount=type=secret,id=KUBECONFIG \
|
||||
KUBECONFIG=$(cat /run/secrets/KUBECONFIG) helm upgrade --install
|
||||
RUN --mount=type=secret,id=KUBECONFIG,env=KUBECONFIG \
|
||||
helm upgrade --install
|
||||
```
|
||||
|
||||
### `target.shm-size`
|
||||
@@ -853,8 +892,7 @@ target "default" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -919,14 +957,12 @@ target "app" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> If you do not provide a `hard limit`, the `soft limit` is used
|
||||
> for both values. If no `ulimits` are set, they are inherited from
|
||||
> the default `ulimits` set on the daemon.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -1114,8 +1150,7 @@ target "webapp-dev" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> See [User defined HCL functions][hcl-funcs] page for more details.
|
||||
|
||||
<!-- external links -->
|
||||
|
@@ -4,8 +4,7 @@ To assist with creating and debugging complex builds, Buildx provides a
|
||||
debugger to help you step through the build process and easily inspect the
|
||||
state of the build environment at any point.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The debug monitor is a new experimental feature in recent versions of Buildx.
|
||||
> There are rough edges, known bugs, and missing features. Please try it out
|
||||
> and let us know what you think!
|
||||
|
@@ -32,6 +32,7 @@ Extended build capabilities with BuildKit
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -13,20 +13,24 @@ Build from a file
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------|:--------------|:--------|:-----------------------------------------------------------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||
| `--load` | | | Shorthand for `--set=*.output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
||||
| [`--print`](#print) | | | Print the options without building |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
|
||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` |
|
||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--allow` | `stringArray` | | Allow build to access specified resources |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||
| `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||
| [`--no-cache`](#no-cache) | `bool` | | Do not use cache when building the image |
|
||||
| [`--print`](#print) | `bool` | | Print the options without building |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
|
||||
| [`--pull`](#pull) | `bool` | | Always attempt to pull all referenced images |
|
||||
| `--push` | `bool` | | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` |
|
||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -39,8 +43,7 @@ as part of the build.
|
||||
Read [High-level build options with Bake](https://docs.docker.com/build/bake/)
|
||||
guide for introduction to writing bake files.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> `buildx bake` command may receive backwards incompatible features in the future
|
||||
> if needed. We are looking for feedback on improving the command and extending
|
||||
> the functionality further.
|
||||
@@ -51,6 +54,14 @@ guide for introduction to writing bake files.
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="call"></a> Invoke a frontend method (--call)
|
||||
|
||||
Same as [`build --call`](buildx_build.md#call).
|
||||
|
||||
#### <a name="check"></a> Call: check (--check)
|
||||
|
||||
Same as [`build --check`](buildx_build.md#check).
|
||||
|
||||
### <a name="file"></a> Specify a build definition file (-f, --file)
|
||||
|
||||
Use the `-f` / `--file` option to specify the build definition file to use.
|
||||
@@ -119,6 +130,7 @@ $ cat metadata.json
|
||||
|
||||
```json
|
||||
{
|
||||
"buildx.build.warnings": {},
|
||||
"db": {
|
||||
"buildx.build.provenance": {},
|
||||
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
|
||||
@@ -152,15 +164,19 @@ $ cat metadata.json
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Build record [provenance](https://docs.docker.com/build/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> [!NOTE]
|
||||
> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> (`buildx.build.provenance`) includes minimal provenance by default. Set the
|
||||
> `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior:
|
||||
> * `min` sets minimal provenance (default).
|
||||
> * `max` sets full provenance.
|
||||
> * `disabled`, `false` or `0` does not set any provenance.
|
||||
|
||||
> [!NOTE]
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="no-cache"></a> Don't use cache when building the image (--no-cache)
|
||||
|
||||
Same as `build --no-cache`. Don't use cache when building the image.
|
||||
|
@@ -9,49 +9,50 @@ Start a build
|
||||
|
||||
### Aliases
|
||||
|
||||
`docker buildx build`, `docker buildx b`
|
||||
`docker build`, `docker builder build`, `docker image build`, `docker buildx b`
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
||||
| [`--add-host`](https://docs.docker.com/reference/cli/docker/image/build/#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--cgroup-parent`](https://docs.docker.com/reference/cli/docker/image/build/#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| `--check` | | | Shorthand for `--call=check` |
|
||||
| `--detach` | | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| [`-f`](https://docs.docker.com/reference/cli/docker/image/build/#file), [`--file`](https://docs.docker.com/reference/cli/docker/image/build/#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | | | Do not use cache when building the image |
|
||||
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
|
||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
|
||||
| `--pull` | | | Always attempt to pull all referenced images |
|
||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/reference/cli/docker/image/build/#tag), [`--tag`](https://docs.docker.com/reference/cli/docker/image/build/#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target`](https://docs.docker.com/reference/cli/docker/image/build/#target) | `string` | | Set the target build stage to build |
|
||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
||||
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| [`--load`](#load) | `bool` | | Shorthand for `--output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||
| [`--network`](#network) | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
||||
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
|
||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
|
||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target`](#target) | `string` | | Set the target build stage to build |
|
||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -61,15 +62,36 @@ Flags marked with `[experimental]` need to be explicitly enabled by setting the
|
||||
|
||||
## Description
|
||||
|
||||
The `buildx build` command starts a build using BuildKit. This command is similar
|
||||
to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
For documentation on most of these flags, refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/reference/cli/docker/image/build/).
|
||||
This page describes a subset of the new flags.
|
||||
The `docker buildx build` command starts a build using BuildKit.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="add-host"></a> Add entries to container hosts file (--add-host)
|
||||
|
||||
You can add other hosts into a build container's `/etc/hosts` file by using one
|
||||
or more `--add-host` flags. This example adds static addresses for hosts named
|
||||
`my-hostname` and `my_hostname_v6`:
|
||||
|
||||
```console
|
||||
$ docker buildx build --add-host my_hostname=8.8.8.8 --add-host my_hostname_v6=2001:4860:4860::8888 .
|
||||
```
|
||||
|
||||
If you need your build to connect to services running on the host, you can use
|
||||
the special `host-gateway` value for `--add-host`. In the following example,
|
||||
build containers resolve `host.docker.internal` to the host's gateway IP.
|
||||
|
||||
```console
|
||||
$ docker buildx build --add-host host.docker.internal=host-gateway .
|
||||
```
|
||||
|
||||
You can wrap an IPv6 address in square brackets.
|
||||
`=` and `:` are both valid separators.
|
||||
Both formats in the following example are valid:
|
||||
|
||||
```console
|
||||
$ docker buildx build --add-host my-hostname:10.180.0.1 --add-host my-hostname_v6=[2001:4860:4860::8888] .
|
||||
```
|
||||
|
||||
### <a name="annotation"></a> Create annotations (--annotation)
|
||||
|
||||
```text
|
||||
@@ -123,7 +145,7 @@ For more information about annotations, see
|
||||
--attest=type=provenance,...
|
||||
```
|
||||
|
||||
Create [image attestations](https://docs.docker.com/build/attestations/).
|
||||
Create [image attestations](https://docs.docker.com/build/metadata/attestations/).
|
||||
BuildKit currently supports:
|
||||
|
||||
- `sbom` - Software Bill of Materials.
|
||||
@@ -131,7 +153,7 @@ BuildKit currently supports:
|
||||
Use `--attest=type=sbom` to generate an SBOM for an image at build-time.
|
||||
Alternatively, you can use the [`--sbom` shorthand](#sbom).
|
||||
|
||||
For more information, see [here](https://docs.docker.com/build/attestations/sbom/).
|
||||
For more information, see [here](https://docs.docker.com/build/metadata/attestations/sbom/).
|
||||
|
||||
- `provenance` - SLSA Provenance
|
||||
|
||||
@@ -141,7 +163,7 @@ BuildKit currently supports:
|
||||
By default, a minimal provenance attestation will be created for the build
|
||||
result, which will only be attached for images pushed to registries.
|
||||
|
||||
For more information, see [here](https://docs.docker.com/build/attestations/slsa-provenance/).
|
||||
For more information, see [here](https://docs.docker.com/build/metadata/attestations/slsa-provenance/).
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
@@ -165,7 +187,40 @@ $ docker buildx build --allow security.insecure .
|
||||
|
||||
### <a name="build-arg"></a> Set build-time variables (--build-arg)
|
||||
|
||||
Same as [`docker build` command](https://docs.docker.com/reference/cli/docker/image/build/#build-arg).
|
||||
You can use `ENV` instructions in a Dockerfile to define variable values. These
|
||||
values persist in the built image. Often persistence isn't what you want. Users
|
||||
want to specify variables differently depending on which host they build an
|
||||
image on.
|
||||
|
||||
A good example is `http_proxy` or source versions for pulling intermediate
|
||||
files. The `ARG` instruction lets Dockerfile authors define values that users
|
||||
can set at build-time using the `--build-arg` flag:
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 .
|
||||
```
|
||||
|
||||
This flag allows you to pass the build-time variables that are
|
||||
accessed like regular environment variables in the `RUN` instruction of the
|
||||
Dockerfile. These values don't persist in the intermediate or final images
|
||||
like `ENV` values do. You must add `--build-arg` for each build argument.
|
||||
|
||||
Using this flag doesn't alter the output you see when the build process echoes the`ARG` lines from the
|
||||
Dockerfile.
|
||||
|
||||
For detailed information on using `ARG` and `ENV` instructions, see the
|
||||
[Dockerfile reference](https://docs.docker.com/reference/dockerfile/).
|
||||
|
||||
You can also use the `--build-arg` flag without a value, in which case the daemon
|
||||
propagates the value from the local environment into the Docker container it's building:
|
||||
|
||||
```console
|
||||
$ export HTTP_PROXY=http://10.20.30.2:1234
|
||||
$ docker buildx build --build-arg HTTP_PROXY .
|
||||
```
|
||||
|
||||
This example is similar to how `docker run -e` works. Refer to the [`docker run` documentation](container_run.md#env)
|
||||
for more information.
|
||||
|
||||
There are also useful built-in build arguments, such as:
|
||||
|
||||
@@ -271,6 +326,167 @@ $ docker buildx build --cache-from=type=s3,region=eu-west-1,bucket=mybucket .
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="call"></a> Invoke a frontend method (--call)
|
||||
|
||||
```text
|
||||
--call=[build|check|outline|targets]
|
||||
```
|
||||
|
||||
BuildKit frontends can support alternative modes of executions for builds,
|
||||
using frontend methods. Frontend methods are a way to change or extend the
|
||||
behavior of a build invocation, which lets you, for example, inspect, validate,
|
||||
or generate alternative outputs from a build.
|
||||
|
||||
The `--call` flag for `docker buildx build` lets you specify the frontend
|
||||
method that you want to execute. If this flag is unspecified, it defaults to
|
||||
executing the build and evaluating [build checks](https://docs.docker.com/reference/build-checks/).
|
||||
|
||||
For Dockerfiles, the available methods are:
|
||||
|
||||
| Command | Description |
|
||||
| ------------------------------ | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| `build` (default) | Execute the build and evaluate build checks for the current build target. |
|
||||
| `check` | Evaluate build checks for the either the entire Dockerfile or the selected target, without executing a build. |
|
||||
| `outline` | Show the build arguments that you can set for a target, and their default values. |
|
||||
| `targets` | List all the build targets in the Dockerfile. |
|
||||
| `subrequests.describe` | List all the frontend methods that the current frontend supports. |
|
||||
|
||||
Note that other frontends may implement these or other methods.
|
||||
To see the list of available methods for the frontend you're using,
|
||||
use `--call=subrequests.describe`.
|
||||
|
||||
```console
|
||||
$ docker buildx build -q --call=subrequests.describe .
|
||||
|
||||
NAME VERSION DESCRIPTION
|
||||
outline 1.0.0 List all parameters current build target supports
|
||||
targets 1.0.0 List all targets current build supports
|
||||
subrequests.describe 1.0.0 List available subrequest types
|
||||
```
|
||||
|
||||
#### Descriptions
|
||||
|
||||
The [`--call=targets`](#call-targets) and [`--call=outline`](#call-outline)
|
||||
methods include descriptions for build targets and arguments, if available.
|
||||
Descriptions are generated from comments in the Dockerfile. A comment on the
|
||||
line before a `FROM` instruction becomes the description of a build target, and
|
||||
a comment before an `ARG` instruction the description of a build argument. The
|
||||
comment must lead with the name of the stage or argument, for example:
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# GO_VERSION sets the Go version for the build
|
||||
ARG GO_VERSION=1.22
|
||||
|
||||
# base-builder is the base stage for building the project
|
||||
FROM golang:${GO_VERSION} AS base-builder
|
||||
```
|
||||
|
||||
When you run `docker buildx build --call=outline`, the output includes the
|
||||
descriptions, as follows:
|
||||
|
||||
```console
|
||||
$ docker buildx build -q --call=outline .
|
||||
|
||||
TARGET: base-builder
|
||||
DESCRIPTION: is the base stage for building the project
|
||||
|
||||
BUILD ARG VALUE DESCRIPTION
|
||||
GO_VERSION 1.22 sets the Go version for the build
|
||||
```
|
||||
|
||||
For more examples on how to write Dockerfile docstrings,
|
||||
check out [the Dockerfile for Docker docs](https://github.com/docker/docs/blob/main/Dockerfile).
|
||||
|
||||
#### <a name="check"></a> Call: check (--check)
|
||||
|
||||
The `check` method evaluates build checks without executing the build. The
|
||||
`--check` flag is a convenient shorthand for `--call=check`. Use the `check`
|
||||
method to validate the build configuration before starting the build.
|
||||
|
||||
```console
|
||||
$ docker buildx build -q --check https://github.com/docker/docs.git
|
||||
|
||||
WARNING: InvalidBaseImagePlatform
|
||||
Base image wjdp/htmltest:v0.17.0 was pulled with platform "linux/amd64", expected "linux/arm64" for current build
|
||||
Dockerfile:43
|
||||
--------------------
|
||||
41 | "#content/desktop/previous-versions/*.md"
|
||||
42 |
|
||||
43 | >>> FROM wjdp/htmltest:v${HTMLTEST_VERSION} AS test
|
||||
44 | WORKDIR /test
|
||||
45 | COPY --from=build /out ./public
|
||||
--------------------
|
||||
```
|
||||
|
||||
Using `--check` without specifying a target evaluates the entire Dockerfile.
|
||||
If you want to evaluate a specific target, use the `--target` flag.
|
||||
|
||||
#### Call: outline
|
||||
|
||||
The `outline` method prints the name of the specified target (or the default
|
||||
target, if `--target` isn't specified), and the build arguments that the target
|
||||
consumes, along with their default values, if set.
|
||||
|
||||
The following example shows the default target `release` and its build arguments:
|
||||
|
||||
```console
|
||||
$ docker buildx build -q --call=outline https://github.com/docker/docs.git
|
||||
|
||||
TARGET: release
|
||||
DESCRIPTION: is an empty scratch image with only compiled assets
|
||||
|
||||
BUILD ARG VALUE DESCRIPTION
|
||||
GO_VERSION 1.22 sets the Go version for the base stage
|
||||
HUGO_VERSION 0.127.0
|
||||
HUGO_ENV sets the hugo.Environment (production, development, preview)
|
||||
DOCS_URL sets the base URL for the site
|
||||
PAGEFIND_VERSION 1.1.0
|
||||
```
|
||||
|
||||
This means that the `release` target is configurable using these build arguments:
|
||||
|
||||
```console
|
||||
$ docker buildx build \
|
||||
--build-arg GO_VERSION=1.22 \
|
||||
--build-arg HUGO_VERSION=0.127.0 \
|
||||
--build-arg HUGO_ENV=production \
|
||||
--build-arg DOCS_URL=https://example.com \
|
||||
--build-arg PAGEFIND_VERSION=1.1.0 \
|
||||
--target release https://github.com/docker/docs.git
|
||||
```
|
||||
|
||||
#### Call: targets
|
||||
|
||||
The `targets` method lists all the build targets in the Dockerfile. These are
|
||||
the stages that you can build using the `--target` flag. It also indicates the
|
||||
default target, which is the target that will be built when you don't specify a
|
||||
target.
|
||||
|
||||
```console
|
||||
$ docker buildx build -q --call=targets https://github.com/docker/docs.git
|
||||
|
||||
TARGET DESCRIPTION
|
||||
base is the base stage with build dependencies
|
||||
node installs Node.js dependencies
|
||||
hugo downloads and extracts the Hugo binary
|
||||
build-base is the base stage for building the site
|
||||
dev is for local development with Docker Compose
|
||||
build creates production builds with Hugo
|
||||
lint lints markdown files
|
||||
test validates HTML output and checks for broken links
|
||||
update-modules downloads and vendors Hugo modules
|
||||
vendor is an empty stage with only vendored Hugo modules
|
||||
build-upstream builds an upstream project with a replacement module
|
||||
validate-upstream validates HTML output for upstream builds
|
||||
unused-media checks for unused graphics and other media
|
||||
pagefind installs the Pagefind runtime
|
||||
index generates a Pagefind index
|
||||
test-go-redirects checks that the /go/ redirects are valid
|
||||
release (default) is an empty scratch image with only compiled assets
|
||||
```
|
||||
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
|
||||
```text
|
||||
@@ -310,6 +526,27 @@ $ docker buildx build --cache-to=type=s3,region=eu-west-1,bucket=mybucket .
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="cgroup-parent"></a> Use a custom parent cgroup (--cgroup-parent)
|
||||
|
||||
When you run `docker buildx build` with the `--cgroup-parent` option,
|
||||
the daemon runs the containers used in the build with the
|
||||
[corresponding `docker run` flag](container_run.md#cgroup-parent).
|
||||
|
||||
### <a name="file"></a> Specify a Dockerfile (-f, --file)
|
||||
|
||||
```console
|
||||
$ docker buildx build -f <filepath> .
|
||||
```
|
||||
|
||||
Specifies the filepath of the Dockerfile to use.
|
||||
If unspecified, a file named `Dockerfile` at the root of the build context is used by default.
|
||||
|
||||
To read a Dockerfile from stdin, you can use `-` as the argument for `--file`.
|
||||
|
||||
```console
|
||||
$ cat Dockerfile | docker buildx build -f - .
|
||||
```
|
||||
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
@@ -330,6 +567,7 @@ $ cat metadata.json
|
||||
{
|
||||
"buildx.build.provenance": {},
|
||||
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
|
||||
"buildx.build.warnings": {},
|
||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"containerimage.descriptor": {
|
||||
"annotations": {
|
||||
@@ -344,14 +582,29 @@ $ cat metadata.json
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Build record [provenance](https://docs.docker.com/build/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> [!NOTE]
|
||||
> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> (`buildx.build.provenance`) includes minimal provenance by default. Set the
|
||||
> `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior:
|
||||
> * `min` sets minimal provenance (default).
|
||||
> * `max` sets full provenance.
|
||||
> * `disabled`, `false` or `0` does not set any provenance.
|
||||
>
|
||||
> - `min` sets minimal provenance (default).
|
||||
> - `max` sets full provenance.
|
||||
> - `disabled`, `false` or `0` doesn't set any provenance.
|
||||
|
||||
> [!NOTE]
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="network"></a> Set the networking mode for the RUN instructions during build (--network)
|
||||
|
||||
Available options for the networking mode are:
|
||||
|
||||
- `default` (default): Run in the default network.
|
||||
- `none`: Run with no network access.
|
||||
- `host`: Run in the host’s network environment.
|
||||
|
||||
Find more details in the [Dockerfile reference](https://docs.docker.com/reference/dockerfile/#run---network).
|
||||
|
||||
### <a name="no-cache-filter"></a> Ignore build cache for specific stages (--no-cache-filter)
|
||||
|
||||
@@ -414,17 +667,19 @@ The arguments for the `--no-cache-filter` flag must be names of stages.
|
||||
-o, --output=[PATH,-,type=TYPE[,KEY=VALUE]
|
||||
```
|
||||
|
||||
Sets the export action for the build result. In `docker build` all builds finish
|
||||
by creating a container image and exporting it to `docker images`. `buildx` makes
|
||||
this step configurable allowing results to be exported directly to the client,
|
||||
OCI image tarballs, registry etc.
|
||||
Sets the export action for the build result. The default output, when using the
|
||||
`docker` [build driver](https://docs.docker.com/build/builders/drivers/), is a container
|
||||
image exported to the local image store. The `--output` flag makes this step
|
||||
configurable allows export of results directly to the client's filesystem, an
|
||||
OCI image tarball, a registry, and more.
|
||||
|
||||
Buildx with `docker` driver currently only supports local, tarball exporter and
|
||||
image exporter. `docker-container` driver supports all the exporters.
|
||||
Buildx with `docker` driver only supports the local, tarball, and image
|
||||
[exporters](https://docs.docker.com/build/exporters/). The `docker-container`
|
||||
driver supports all exporters.
|
||||
|
||||
If just the path is specified as a value, `buildx` will use the local exporter
|
||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||
exporter and write to `stdout`.
|
||||
If you only specify a filepath as the argument to `--output`, Buildx uses the
|
||||
local exporter. If the value is `-`, Buildx uses the `tar` exporter and writes
|
||||
the output to stdout.
|
||||
|
||||
```console
|
||||
$ docker buildx build -o . .
|
||||
@@ -435,12 +690,17 @@ $ docker buildx build -o type=docker,dest=- . > myimage.tar
|
||||
$ docker buildx build -t tonistiigi/foo -o type=registry
|
||||
```
|
||||
|
||||
> **Note **
|
||||
>
|
||||
> Since BuildKit v0.13.0 multiple outputs can be specified by repeating the flag.
|
||||
You can export multiple outputs by repeating the flag.
|
||||
|
||||
Supported exported types are:
|
||||
|
||||
- [`local`](#local)
|
||||
- [`tar`](#tar)
|
||||
- [`oci`](#oci)
|
||||
- [`docker`](#docker)
|
||||
- [`image`](#image)
|
||||
- [`registry`](#registry)
|
||||
|
||||
#### `local`
|
||||
|
||||
The `local` export type writes all result files to a directory on the client. The
|
||||
@@ -451,6 +711,9 @@ Attribute key:
|
||||
|
||||
- `dest` - destination directory where files will be written
|
||||
|
||||
For more information, see
|
||||
[Local and tar exporters](https://docs.docker.com/build/exporters/local-tar/).
|
||||
|
||||
#### `tar`
|
||||
|
||||
The `tar` export type writes all result files as a single tarball on the client.
|
||||
@@ -460,6 +723,9 @@ Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
For more information, see
|
||||
[Local and tar exporters](https://docs.docker.com/build/exporters/local-tar/).
|
||||
|
||||
#### `oci`
|
||||
|
||||
The `oci` export type writes the result image or manifest list as an [OCI image
|
||||
@@ -470,6 +736,9 @@ Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
For more information, see
|
||||
[OCI and Docker exporters](https://docs.docker.com/build/exporters/oci-docker/).
|
||||
|
||||
#### `docker`
|
||||
|
||||
The `docker` export type writes the single-platform result image as a [Docker image
|
||||
@@ -486,6 +755,9 @@ Attribute keys:
|
||||
the tar will be loaded automatically to the local image store.
|
||||
- `context` - name for the Docker context where to import the result
|
||||
|
||||
For more information, see
|
||||
[OCI and Docker exporters](https://docs.docker.com/build/exporters/oci-docker/).
|
||||
|
||||
#### `image`
|
||||
|
||||
The `image` exporter writes the build result as an image or a manifest list. When
|
||||
@@ -497,10 +769,16 @@ Attribute keys:
|
||||
- `name` - name (references) for the new image.
|
||||
- `push` - Boolean to automatically push the image.
|
||||
|
||||
For more information, see
|
||||
[Image and registry exporters](https://docs.docker.com/build/exporters/image-registry/).
|
||||
|
||||
#### `registry`
|
||||
|
||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||
|
||||
For more information, see
|
||||
[Image and registry exporters](https://docs.docker.com/build/exporters/image-registry/).
|
||||
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
|
||||
```text
|
||||
@@ -527,13 +805,12 @@ support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
Docker Desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/reference/dockerfile/#automatic-platform-args-in-the-global-scope)
|
||||
`TARGETPLATFORM` build argument. Refer to the [Dockerfile reference](https://docs.docker.com/reference/dockerfile/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
You can find the formatting definition for the platform specifier in the
|
||||
@@ -551,11 +828,10 @@ $ docker buildx build --platform=darwin .
|
||||
--progress=VALUE
|
||||
```
|
||||
|
||||
Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container
|
||||
output (default "auto").
|
||||
Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
|
||||
output (default `auto`).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
@@ -573,11 +849,13 @@ $ docker buildx build --load --progress=plain .
|
||||
...
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Check also the [`BUILDKIT_COLORS`](https://docs.docker.com/build/building/variables/#buildkit_colors)
|
||||
> environment variable for modifying the colors of the terminal output.
|
||||
|
||||
The `rawjson` output marshals the solve status events from BuildKit to JSON lines.
|
||||
This mode is designed to be read by an external program.
|
||||
|
||||
### <a name="provenance"></a> Create provenance attestations (--provenance)
|
||||
|
||||
Shorthand for [`--attest=type=provenance`](#attest), used to configure
|
||||
@@ -596,7 +874,7 @@ to a registry if you use the default image store. Alternatively, you can switch
|
||||
to using the containerd image store.
|
||||
|
||||
For more information about provenance attestations, see
|
||||
[here](https://docs.docker.com/build/attestations/slsa-provenance/).
|
||||
[here](https://docs.docker.com/build/metadata/attestations/slsa-provenance/).
|
||||
|
||||
### <a name="push"></a> Push the build result to a registry (--push)
|
||||
|
||||
@@ -618,7 +896,7 @@ attestations. Provenance attestations only persist for images pushed directly
|
||||
to a registry if you use the default image store. Alternatively, you can switch
|
||||
to using the containerd image store.
|
||||
|
||||
For more information, see [here](https://docs.docker.com/build/attestations/sbom/).
|
||||
For more information, see [here](https://docs.docker.com/build/metadata/attestations/sbom/).
|
||||
|
||||
### <a name="secret"></a> Secret to expose to the build (--secret)
|
||||
|
||||
@@ -669,8 +947,8 @@ Attribute keys:
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM node:alpine
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=secret,id=SECRET_TOKEN \
|
||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
||||
--mount=type=secret,id=SECRET_TOKEN,env=SECRET_TOKEN \
|
||||
yarn run test
|
||||
```
|
||||
|
||||
```console
|
||||
@@ -686,8 +964,7 @@ The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||
(gigabytes). If you omit the unit, the system uses bytes.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -723,6 +1000,46 @@ $ ssh-add ~/.ssh/id_rsa
|
||||
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
|
||||
```
|
||||
|
||||
### <a name="tag"></a> Tag an image (-t, --tag)
|
||||
|
||||
```console
|
||||
$ docker buildx build -t docker/apache:2.0 .
|
||||
```
|
||||
|
||||
This examples builds in the same way as the previous example, but it then tags the resulting
|
||||
image. The repository name will be `docker/apache` and the tag `2.0`.
|
||||
|
||||
[Read more about valid tags](https://docs.docker.com/reference/cli/docker/image/tag/).
|
||||
|
||||
You can apply multiple tags to an image. For example, you can apply the `latest`
|
||||
tag to a newly built image and add another tag that references a specific
|
||||
version.
|
||||
|
||||
For example, to tag an image both as `docker/fedora-jboss:latest` and
|
||||
`docker/fedora-jboss:v2.1`, use the following:
|
||||
|
||||
```console
|
||||
$ docker buildx build -t docker/fedora-jboss:latest -t docker/fedora-jboss:v2.1 .
|
||||
```
|
||||
|
||||
### <a name="target"></a> Specifying target build stage (--target)
|
||||
|
||||
When building a Dockerfile with multiple build stages, use the `--target`
|
||||
option to specify an intermediate build stage by name as a final stage for the
|
||||
resulting image. The builder skips commands after the target stage.
|
||||
|
||||
```dockerfile
|
||||
FROM debian AS build-env
|
||||
# ...
|
||||
|
||||
FROM alpine AS production-env
|
||||
# ...
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx build -t mybuildimage --target build-env .
|
||||
```
|
||||
|
||||
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
||||
|
||||
`--ulimit` overrides the default ulimits of build's containers when using `RUN`
|
||||
@@ -733,14 +1050,12 @@ instructions and are specified with a soft and hard limit as such:
|
||||
$ docker buildx build --ulimit nofile=1024:1024 .
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> If you don't provide a `hard limit`, the `soft limit` is used
|
||||
> for both values. If no `ulimits` are set, they're inherited from
|
||||
> the default `ulimits` set on the daemon.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
|
@@ -11,17 +11,18 @@ Create a new builder instance
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------------|:--------------|:--------|:----------------------------------------------------------------------|
|
||||
| [`--append`](#append) | | | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | | | Boot builder after creation |
|
||||
| [`--append`](#append) | `bool` | | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | `bool` | | Boot builder after creation |
|
||||
| [`--buildkitd-config`](#buildkitd-config) | `string` | | BuildKit daemon config file |
|
||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | BuildKit daemon flags |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) |
|
||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
||||
| [`--leave`](#leave) | `bool` | | Remove a node from builder instead of changing it |
|
||||
| [`--name`](#name) | `string` | | Builder instance name |
|
||||
| [`--node`](#node) | `string` | | Create/modify node with given name |
|
||||
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node |
|
||||
| [`--use`](#use) | | | Set the current builder instance |
|
||||
| [`--use`](#use) | `bool` | | Set the current builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -101,8 +102,7 @@ value is `auto` and can be one of `bridge`, `cni`, `host`:
|
||||
--buildkitd-flags '--oci-worker-net bridge'
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Network mode "bridge" is supported since BuildKit v0.13 and will become the
|
||||
> default in next v0.14.
|
||||
|
||||
@@ -120,7 +120,7 @@ backend. Buildx supports the following drivers:
|
||||
* `kubernetes`
|
||||
* `remote`
|
||||
|
||||
For more information about build drivers, see [here](https://docs.docker.com/build/drivers/).
|
||||
For more information about build drivers, see [here](https://docs.docker.com/build/builders/drivers/).
|
||||
|
||||
#### `docker` driver
|
||||
|
||||
@@ -167,10 +167,10 @@ Passes additional driver-specific options.
|
||||
For information about available driver options, refer to the detailed
|
||||
documentation for the specific driver:
|
||||
|
||||
* [`docker` driver](https://docs.docker.com/build/drivers/docker/)
|
||||
* [`docker-container` driver](https://docs.docker.com/build/drivers/docker-container/)
|
||||
* [`kubernetes` driver](https://docs.docker.com/build/drivers/kubernetes/)
|
||||
* [`remote` driver](https://docs.docker.com/build/drivers/remote/)
|
||||
* [`docker` driver](https://docs.docker.com/build/builders/drivers/docker/)
|
||||
* [`docker-container` driver](https://docs.docker.com/build/builders/drivers/docker-container/)
|
||||
* [`kubernetes` driver](https://docs.docker.com/build/builders/drivers/kubernetes/)
|
||||
* [`remote` driver](https://docs.docker.com/build/builders/drivers/remote/)
|
||||
|
||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||
|
||||
|
@@ -12,15 +12,16 @@ Start debugger (EXPERIMENTAL)
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:---------|:--------|:---------------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
|
||||
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
|
||||
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`) for the monitor. Use plain to show container output |
|
||||
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
|
||||
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
|
||||
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
|
||||
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -5,49 +5,50 @@ Start a build
|
||||
|
||||
### Aliases
|
||||
|
||||
`docker buildx debug build`, `docker buildx debug b`
|
||||
`docker build`, `docker builder build`, `docker image build`, `docker buildx b`
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
||||
| [`--add-host`](https://docs.docker.com/reference/cli/docker/image/build/#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| `--annotation` | `stringArray` | | Add annotation to the image |
|
||||
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| `--build-arg` | `stringArray` | | Set build-time variables |
|
||||
| `--build-context` | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--cache-from` | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| `--cache-to` | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--cgroup-parent`](https://docs.docker.com/reference/cli/docker/image/build/#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| `--check` | | | Shorthand for `--call=check` |
|
||||
| `--detach` | | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| [`-f`](https://docs.docker.com/reference/cli/docker/image/build/#file), [`--file`](https://docs.docker.com/reference/cli/docker/image/build/#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| `--load` | | | Shorthand for `--output=type=docker` |
|
||||
| `--metadata-file` | `string` | | Write build result metadata to a file |
|
||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | | | Do not use cache when building the image |
|
||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| `--platform` | `stringArray` | | Set target platform for build |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
|
||||
| `--pull` | | | Always attempt to pull all referenced images |
|
||||
| `--push` | | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
||||
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
||||
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/reference/cli/docker/image/build/#tag), [`--tag`](https://docs.docker.com/reference/cli/docker/image/build/#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target`](https://docs.docker.com/reference/cli/docker/image/build/#target) | `string` | | Set the target build stage to build |
|
||||
| `--ulimit` | `ulimit` | | Ulimit options |
|
||||
| Name | Type | Default | Description |
|
||||
|:--------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| `--annotation` | `stringArray` | | Add annotation to the image |
|
||||
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| `--build-arg` | `stringArray` | | Set build-time variables |
|
||||
| `--build-context` | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--cache-from` | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| `--cache-to` | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| `--check` | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| `--load` | `bool` | | Shorthand for `--output=type=docker` |
|
||||
| `--metadata-file` | `string` | | Write build result metadata to a file |
|
||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| `--platform` | `stringArray` | | Set target platform for build |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
|
||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
||||
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
||||
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| `--target` | `string` | | Set the target build stage to build |
|
||||
| `--ulimit` | `ulimit` | | Ulimit options |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -5,11 +5,12 @@ Proxy current stdio streams to builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:-------------|:---------|:--------|:-------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--platform` | `string` | | Target platform: this is used for node selection |
|
||||
| `--progress` | `string` | `quiet` | Set type of progress output (auto, plain, tty). |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--platform` | `string` | | Target platform: this is used for node selection |
|
||||
| `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,8 +12,9 @@ Disk usage
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--filter` | `filter` | | Provide filter values |
|
||||
| [`--verbose`](#verbose) | | | Provide a more verbose output |
|
||||
| [`--verbose`](#verbose) | `bool` | | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -20,6 +20,7 @@ Commands to work on images in registry
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,12 +12,13 @@ Create a new image based on source images
|
||||
| Name | Type | Default | Description |
|
||||
|:---------------------------------|:--------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||
| [`--append`](#append) | | | Append to existing manifest |
|
||||
| [`--append`](#append) | `bool` | | Append to existing manifest |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--dry-run`](#dry-run) | `bool` | | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
||||
| `--prefer-index` | `bool` | `true` | When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
|
||||
|
||||
|
||||
@@ -52,8 +53,7 @@ $ docker buildx imagetools create \
|
||||
foo/bar:alpha foo/bar:beta foo/bar:gamma
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The `imagetools create` command supports adding annotations to the image
|
||||
> index and descriptor, using the following type prefixes:
|
||||
>
|
||||
|
@@ -12,8 +12,9 @@ Show details of an image in the registry
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:----------------|:----------------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
||||
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
|
||||
| [`--raw`](#raw) | `bool` | | Show original, unformatted JSON manifest |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -11,8 +11,9 @@ Inspect current builder instance
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------------|:---------|:--------|:--------------------------------------------|
|
||||
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting |
|
||||
| [`--bootstrap`](#bootstrap) | `bool` | | Ensure builder has booted before inspecting |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -43,8 +44,7 @@ name of the builder to inspect to get information about that builder.
|
||||
The following example shows information about a builder instance named
|
||||
`elated_tesla`:
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The asterisk (`*`) next to node build platform(s) indicate they have been
|
||||
> manually set during `buildx create`. Otherwise the platforms were
|
||||
> automatically detected.
|
||||
|
@@ -9,9 +9,10 @@ List builder instances
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:---------|:--------|:------------------|
|
||||
| [`--format`](#format) | `string` | `table` | Format the output |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:---------|:--------|:---------------------|
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--format`](#format) | `string` | `table` | Format the output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -11,12 +11,13 @@ Remove build cache
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:------------------------------------------|
|
||||
| `-a`, `--all` | | | Include internal/frontend images |
|
||||
| `-a`, `--all` | `bool` | | Include internal/frontend images |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
||||
| `-f`, `--force` | `bool` | | Do not prompt for confirmation |
|
||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | | | Provide a more verbose output |
|
||||
| `--verbose` | `bool` | | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -11,11 +11,12 @@ Remove one or more builder instances
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders |
|
||||
| [`--all-inactive`](#all-inactive) | `bool` | | Remove all inactive builders |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
|
||||
| [`--keep-daemon`](#keep-daemon) | | | Keep the BuildKit daemon running |
|
||||
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`-f`](#force), [`--force`](#force) | `bool` | | Do not prompt for confirmation |
|
||||
| [`--keep-daemon`](#keep-daemon) | `bool` | | Keep the BuildKit daemon running |
|
||||
| [`--keep-state`](#keep-state) | `bool` | | Keep BuildKit state |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,6 +12,7 @@ Stop builder instance
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,8 +12,9 @@ Set the current builder instance
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-------------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--default` | | | Set builder as default for current context |
|
||||
| `--global` | | | Builder persists context changes |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--default` | `bool` | | Set builder as default for current context |
|
||||
| `--global` | `bool` | | Builder persists context changes |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -7,6 +7,12 @@ docker buildx version
|
||||
<!---MARKER_GEN_START-->
|
||||
Show buildx version information
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:-------|:--------|:---------------------|
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
|
@@ -18,9 +18,8 @@ import (
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/opts"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
@@ -96,7 +95,7 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, imagetypes.CreateOptions{
|
||||
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
|
||||
RegistryAuth: ra,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -256,17 +255,16 @@ func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) e
|
||||
defer srcArchive.Close()
|
||||
|
||||
baseDir := path.Dir(confutil.DefaultBuildKitConfigDir)
|
||||
return d.DockerAPI.CopyToContainer(ctx, d.Name, baseDir, srcArchive, dockertypes.CopyToContainerOptions{})
|
||||
return d.DockerAPI.CopyToContainer(ctx, d.Name, baseDir, srcArchive, container.CopyToContainerOptions{})
|
||||
}
|
||||
|
||||
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
||||
execConfig := dockertypes.ExecConfig{
|
||||
response, err := d.DockerAPI.ContainerExecCreate(ctx, d.Name, container.ExecOptions{
|
||||
Cmd: cmd,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
}
|
||||
response, err := d.DockerAPI.ContainerExecCreate(ctx, d.Name, execConfig)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
@@ -276,7 +274,7 @@ func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, erro
|
||||
return "", nil, errors.New("exec ID empty")
|
||||
}
|
||||
|
||||
resp, err := d.DockerAPI.ContainerExecAttach(ctx, execID, dockertypes.ExecStartCheck{})
|
||||
resp, err := d.DockerAPI.ContainerExecAttach(ctx, execID, container.ExecStartOptions{})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@@ -29,7 +29,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
_, err := d.DockerAPI.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(driver.ErrNotConnecting, err.Error())
|
||||
return nil, errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
||||
}
|
||||
return &driver.Info{
|
||||
Status: driver.Running,
|
||||
@@ -39,7 +39,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
||||
v, err := d.DockerAPI.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(driver.ErrNotConnecting, err.Error())
|
||||
return "", errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
||||
}
|
||||
if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" {
|
||||
return bkversion, nil
|
||||
|
@@ -14,8 +14,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNotRunning = errors.Errorf("driver not running")
|
||||
var ErrNotConnecting = errors.Errorf("driver not connecting")
|
||||
type ErrNotRunning struct{}
|
||||
|
||||
func (ErrNotRunning) Error() string {
|
||||
return "driver not running"
|
||||
}
|
||||
|
||||
type ErrNotConnecting struct{}
|
||||
|
||||
func (ErrNotConnecting) Error() string {
|
||||
return "driver not connecting"
|
||||
}
|
||||
|
||||
type Status int
|
||||
|
||||
@@ -105,7 +114,7 @@ func Boot(ctx, clientContext context.Context, d *DriverHandle, pw progress.Write
|
||||
|
||||
c, err := d.Client(clientContext)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrNotRunning && try <= 2 {
|
||||
if errors.Is(err, ErrNotRunning{}) && try <= 2 {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@@ -9,8 +9,8 @@ contexts:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
name: k3s
|
||||
current-context: k3s
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
|
@@ -167,11 +167,12 @@ func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
|
||||
// ConfigFromEndpoint loads kubernetes config from endpoint
|
||||
func ConfigFromEndpoint(endpointName string, s store.Reader) (clientcmd.ClientConfig, error) {
|
||||
if strings.HasPrefix(endpointName, "kubernetes://") {
|
||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
u, _ := url.Parse(endpointName)
|
||||
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
|
||||
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
|
||||
rules.Precedence = append(rules.Precedence, kubeconfig)
|
||||
rules.ExplicitPath = kubeconfig
|
||||
}
|
||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
apiConfig, err := rules.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -1,20 +1,35 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
defer os.Unsetenv("KUBECONFIG")
|
||||
t.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.ClientOptions{}, command.DefaultContextStoreConfig())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
||||
|
||||
func TestConfigFromEndpoint(t *testing.T) {
|
||||
t.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
cfg, err := ConfigFromEndpoint(
|
||||
"kubernetes:///buildx-test-4c972a3f9d369614b40f28a281790c7e?deployment=buildkit-4c2ed3ed-970f-4f3d-a6df-a4fcbab4d5cf-d9d73&kubeconfig=.%2Ffixtures%2Fk3s-kubeconfig",
|
||||
store.New(config.ContextStoreDir(), command.DefaultContextStoreConfig()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
rawcfg, err := cfg.RawConfig()
|
||||
require.NoError(t, err)
|
||||
ctxcfg := "k3s"
|
||||
if _, ok := rawcfg.Contexts[ctxcfg]; !ok {
|
||||
t.Errorf("Context config %q not found", ctxcfg)
|
||||
}
|
||||
}
|
||||
|
@@ -38,7 +38,8 @@ const (
|
||||
|
||||
type Driver struct {
|
||||
driver.InitConfig
|
||||
factory driver.Factory
|
||||
factory driver.Factory
|
||||
clientConfig ClientConfig
|
||||
|
||||
// if you add fields, remember to update docs:
|
||||
// https://github.com/docker/docs/blob/main/content/build/drivers/kubernetes.md
|
||||
@@ -198,7 +199,7 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
|
||||
func (d *Driver) Dial(ctx context.Context) (net.Conn, error) {
|
||||
restClient := d.clientset.CoreV1().RESTClient()
|
||||
restClientConfig, err := d.KubeClientConfig.ClientConfig()
|
||||
restClientConfig, err := d.clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -2,19 +2,22 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/driver/bkimage"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,11 +26,31 @@ const (
|
||||
defaultTimeout = 120 * time.Second
|
||||
)
|
||||
|
||||
type ClientConfig interface {
|
||||
ClientConfig() (*rest.Config, error)
|
||||
Namespace() (string, bool, error)
|
||||
}
|
||||
|
||||
type ClientConfigInCluster struct{}
|
||||
|
||||
func (k ClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
|
||||
func (k ClientConfigInCluster) Namespace() (string, bool, error) {
|
||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return strings.TrimSpace(string(namespace)), true, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
driver.Register(&factory{})
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
cc ClientConfig // used for testing
|
||||
}
|
||||
|
||||
func (*factory) Name() string {
|
||||
@@ -46,18 +69,50 @@ func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.
|
||||
}
|
||||
|
||||
func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver, error) {
|
||||
if cfg.KubeClientConfig == nil {
|
||||
return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName)
|
||||
var err error
|
||||
var cc ClientConfig
|
||||
if f.cc != nil {
|
||||
cc = f.cc
|
||||
} else {
|
||||
cc, err = ctxkube.ConfigFromEndpoint(cfg.EndpointAddr, cfg.ContextStore)
|
||||
if err != nil {
|
||||
// err is returned if cfg.EndpointAddr is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME(@AkihiroSuda): cfg should retain real context name.
|
||||
cc, err = ctxkube.ConfigFromEndpoint("default", cfg.ContextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
tryToUseConfigInCluster := false
|
||||
if cc == nil {
|
||||
tryToUseConfigInCluster = true
|
||||
} else {
|
||||
if _, err := cc.ClientConfig(); err != nil {
|
||||
tryToUseConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseConfigInCluster {
|
||||
ccInCluster := ClientConfigInCluster{}
|
||||
if _, err := ccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
cc = ccInCluster
|
||||
}
|
||||
}
|
||||
if cc == nil {
|
||||
return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName)
|
||||
}
|
||||
}
|
||||
|
||||
deploymentName, err := buildxNameToDeploymentName(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespace, _, err := cfg.KubeClientConfig.Namespace()
|
||||
namespace, _, err := cc.Namespace()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot determine Kubernetes namespace, specify manually")
|
||||
}
|
||||
restClientConfig, err := cfg.KubeClientConfig.ClientConfig()
|
||||
restClientConfig, err := cc.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -67,9 +122,10 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
factory: f,
|
||||
InitConfig: cfg,
|
||||
clientset: clientset,
|
||||
factory: f,
|
||||
clientConfig: cc,
|
||||
InitConfig: cfg,
|
||||
clientset: clientset,
|
||||
}
|
||||
|
||||
deploymentOpt, loadbalance, namespace, defaultLoad, timeout, err := f.processDriverOpts(deploymentName, namespace, cfg)
|
||||
|
@@ -11,29 +11,28 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type mockKubeClientConfig struct {
|
||||
type mockClientConfig struct {
|
||||
clientConfig *rest.Config
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (r *mockKubeClientConfig) ClientConfig() (*rest.Config, error) {
|
||||
func (r *mockClientConfig) ClientConfig() (*rest.Config, error) {
|
||||
return r.clientConfig, nil
|
||||
}
|
||||
|
||||
func (r *mockKubeClientConfig) Namespace() (string, bool, error) {
|
||||
func (r *mockClientConfig) Namespace() (string, bool, error) {
|
||||
return r.namespace, true, nil
|
||||
}
|
||||
|
||||
func TestFactory_processDriverOpts(t *testing.T) {
|
||||
kcc := mockKubeClientConfig{
|
||||
clientConfig: &rest.Config{},
|
||||
}
|
||||
|
||||
cfg := driver.InitConfig{
|
||||
Name: driver.BuilderName("test"),
|
||||
KubeClientConfig: &kcc,
|
||||
Name: driver.BuilderName("test"),
|
||||
}
|
||||
f := factory{
|
||||
cc: &mockClientConfig{
|
||||
clientConfig: &rest.Config{},
|
||||
},
|
||||
}
|
||||
f := factory{}
|
||||
|
||||
t.Run(
|
||||
"ValidOptions", func(t *testing.T) {
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -53,10 +52,17 @@ const (
|
||||
LabelApp = "app"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrReservedAnnotationPlatform = errors.Errorf("the annotation \"%s\" is reserved and cannot be customized", AnnotationPlatform)
|
||||
ErrReservedLabelApp = errors.Errorf("the label \"%s\" is reserved and cannot be customized", LabelApp)
|
||||
)
|
||||
type ErrReservedAnnotationPlatform struct{}
|
||||
|
||||
func (ErrReservedAnnotationPlatform) Error() string {
|
||||
return fmt.Sprintf("the annotation %q is reserved and cannot be customized", AnnotationPlatform)
|
||||
}
|
||||
|
||||
type ErrReservedLabelApp struct{}
|
||||
|
||||
func (ErrReservedLabelApp) Error() string {
|
||||
return fmt.Sprintf("the label %q is reserved and cannot be customized", LabelApp)
|
||||
}
|
||||
|
||||
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) {
|
||||
labels := map[string]string{
|
||||
@@ -73,14 +79,14 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
||||
|
||||
for k, v := range opt.CustomAnnotations {
|
||||
if k == AnnotationPlatform {
|
||||
return nil, nil, ErrReservedAnnotationPlatform
|
||||
return nil, nil, ErrReservedAnnotationPlatform{}
|
||||
}
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
for k, v := range opt.CustomLabels {
|
||||
if k == LabelApp {
|
||||
return nil, nil, ErrReservedLabelApp
|
||||
return nil, nil, ErrReservedLabelApp{}
|
||||
}
|
||||
labels[k] = v
|
||||
}
|
||||
|
@@ -2,17 +2,15 @@ package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/tracing/delegated"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Factory interface {
|
||||
@@ -28,38 +26,18 @@ type BuildkitConfig struct {
|
||||
// Rootless bool
|
||||
}
|
||||
|
||||
type KubeClientConfig interface {
|
||||
ClientConfig() (*rest.Config, error)
|
||||
Namespace() (string, bool, error)
|
||||
}
|
||||
|
||||
type KubeClientConfigInCluster struct{}
|
||||
|
||||
func (k KubeClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
|
||||
func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return strings.TrimSpace(string(namespace)), true, nil
|
||||
}
|
||||
|
||||
type InitConfig struct {
|
||||
// This object needs updates to be generic for different drivers
|
||||
Name string
|
||||
EndpointAddr string
|
||||
DockerAPI dockerclient.APIClient
|
||||
KubeClientConfig KubeClientConfig
|
||||
BuildkitdFlags []string
|
||||
Files map[string][]byte
|
||||
DriverOpts map[string]string
|
||||
Auth Auth
|
||||
Platforms []specs.Platform
|
||||
ContextPathHash string // can be used for determining pods in the driver instance
|
||||
DialMeta map[string][]string
|
||||
Name string
|
||||
EndpointAddr string
|
||||
DockerAPI dockerclient.APIClient
|
||||
ContextStore store.Reader
|
||||
BuildkitdFlags []string
|
||||
Files map[string][]byte
|
||||
DriverOpts map[string]string
|
||||
Auth Auth
|
||||
Platforms []specs.Platform
|
||||
ContextPathHash string
|
||||
DialMeta map[string][]string
|
||||
}
|
||||
|
||||
var drivers map[string]Factory
|
||||
@@ -104,28 +82,15 @@ func GetFactory(name string, instanceRequired bool) (Factory, error) {
|
||||
return nil, errors.Errorf("failed to find driver %q", name)
|
||||
}
|
||||
|
||||
func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, buildkitdFlags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string, dialMeta map[string][]string) (*DriverHandle, error) {
|
||||
ic := InitConfig{
|
||||
EndpointAddr: endpointAddr,
|
||||
DockerAPI: api,
|
||||
KubeClientConfig: kcc,
|
||||
Name: name,
|
||||
BuildkitdFlags: buildkitdFlags,
|
||||
DriverOpts: do,
|
||||
Auth: auth,
|
||||
Platforms: platforms,
|
||||
ContextPathHash: contextPathHash,
|
||||
DialMeta: dialMeta,
|
||||
Files: files,
|
||||
}
|
||||
func GetDriver(ctx context.Context, f Factory, cfg InitConfig) (*DriverHandle, error) {
|
||||
if f == nil {
|
||||
var err error
|
||||
f, err = GetDefaultFactory(ctx, endpointAddr, api, false, dialMeta)
|
||||
f, err = GetDefaultFactory(ctx, cfg.EndpointAddr, cfg.DockerAPI, false, cfg.DialMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := f.New(ctx, ic)
|
||||
d, err := f.New(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -155,9 +120,9 @@ type DriverHandle struct {
|
||||
historyAPISupported bool
|
||||
}
|
||||
|
||||
func (d *DriverHandle) Client(ctx context.Context) (*client.Client, error) {
|
||||
func (d *DriverHandle) Client(ctx context.Context, opt ...client.ClientOpt) (*client.Client, error) {
|
||||
d.once.Do(func() {
|
||||
d.client, d.err = d.Driver.Client(ctx, d.getClientOptions()...)
|
||||
d.client, d.err = d.Driver.Client(ctx, append(d.getClientOptions(), opt...)...)
|
||||
})
|
||||
return d.client, d.err
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/connhelper"
|
||||
"github.com/moby/buildkit/util/tracing/delegated"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -25,6 +27,11 @@ type Driver struct {
|
||||
// https://github.com/docker/docs/blob/main/content/build/drivers/remote.md
|
||||
*tlsOpts
|
||||
defaultLoad bool
|
||||
|
||||
// remote driver caches the client because its Bootstap/Info methods reuse it internally
|
||||
clientOnce sync.Once
|
||||
client *client.Client
|
||||
err error
|
||||
}
|
||||
|
||||
type tlsOpts struct {
|
||||
@@ -78,12 +85,18 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
}
|
||||
|
||||
func (d *Driver) Client(ctx context.Context, opts ...client.ClientOpt) (*client.Client, error) {
|
||||
opts = append([]client.ClientOpt{
|
||||
client.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
|
||||
return d.Dial(ctx)
|
||||
}),
|
||||
}, opts...)
|
||||
return client.New(ctx, "", opts...)
|
||||
d.clientOnce.Do(func() {
|
||||
opts = append([]client.ClientOpt{
|
||||
client.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
|
||||
return d.Dial(ctx)
|
||||
}),
|
||||
client.WithTracerDelegate(delegated.DefaultExporter),
|
||||
}, opts...)
|
||||
c, err := client.New(ctx, "", opts...)
|
||||
d.client = c
|
||||
d.err = err
|
||||
})
|
||||
return d.client, d.err
|
||||
}
|
||||
|
||||
func (d *Driver) Dial(ctx context.Context) (net.Conn, error) {
|
||||
|
@@ -7,15 +7,15 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
// import connhelpers for special url schemes
|
||||
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
|
||||
_ "github.com/moby/buildkit/client/connhelper/kubepod"
|
||||
_ "github.com/moby/buildkit/client/connhelper/ssh"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
util "github.com/docker/buildx/driver/remote/util"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
// import connhelpers for special url schemes
|
||||
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
|
||||
_ "github.com/moby/buildkit/client/connhelper/kubepod"
|
||||
_ "github.com/moby/buildkit/client/connhelper/ssh"
|
||||
)
|
||||
|
||||
const prioritySupported = 20
|
||||
|
@@ -1,7 +1,7 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,18 +1,19 @@
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"slices"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var schemes = map[string]struct{}{
|
||||
"tcp": {},
|
||||
"unix": {},
|
||||
"ssh": {},
|
||||
"docker-container": {},
|
||||
"kube-pod": {},
|
||||
"npipe": {},
|
||||
var schemes = []string{
|
||||
"docker-container",
|
||||
"kube-pod",
|
||||
"npipe",
|
||||
"ssh",
|
||||
"tcp",
|
||||
"unix",
|
||||
}
|
||||
|
||||
func IsValidEndpoint(ep string) error {
|
||||
@@ -20,7 +21,7 @@ func IsValidEndpoint(ep string) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
if _, ok := schemes[endpoint.Scheme]; !ok {
|
||||
if _, ok := slices.BinarySearch(schemes, endpoint.Scheme); !ok {
|
||||
return errors.Errorf("unrecognized url scheme %s", endpoint.Scheme)
|
||||
}
|
||||
return nil
|
||||
|
12
driver/remote/util/endpoint_test.go
Normal file
12
driver/remote/util/endpoint_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSchemes(t *testing.T) {
|
||||
require.True(t, slices.IsSorted(schemes))
|
||||
}
|
89
go.mod
89
go.mod
@@ -1,24 +1,26 @@
|
||||
module github.com/docker/buildx
|
||||
|
||||
go 1.21
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6
|
||||
github.com/compose-spec/compose-go/v2 v2.1.1
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6
|
||||
github.com/containerd/console v1.0.4
|
||||
github.com/containerd/containerd v1.7.17
|
||||
github.com/containerd/containerd v1.7.21
|
||||
github.com/containerd/continuity v0.4.3
|
||||
github.com/containerd/errdefs v0.1.0
|
||||
github.com/containerd/log v0.1.0
|
||||
github.com/containerd/typeurl/v2 v2.1.1
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/distribution/reference v0.5.0
|
||||
github.com/docker/cli v26.1.3+incompatible
|
||||
github.com/docker/cli-docs-tool v0.7.0
|
||||
github.com/docker/docker v26.1.3+incompatible
|
||||
github.com/containerd/platforms v0.2.1
|
||||
github.com/containerd/typeurl/v2 v2.2.0
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.2.0+incompatible
|
||||
github.com/docker/cli-docs-tool v0.8.0
|
||||
github.com/docker/docker v27.2.0+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
@@ -26,9 +28,10 @@ require (
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
|
||||
github.com/hashicorp/hcl/v2 v2.20.1
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/moby/buildkit v0.14.0-rc1
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
github.com/moby/buildkit v0.16.0-rc2
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
@@ -36,20 +39,22 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4
|
||||
github.com/zclconf/go-cty v1.14.4
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/metric v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
golang.org/x/mod v0.14.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.2
|
||||
k8s.io/apimachinery v0.29.2
|
||||
@@ -59,7 +64,6 @@ require (
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.5 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.0.1 // indirect
|
||||
@@ -79,26 +83,29 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/ttrpc v1.2.4 // indirect
|
||||
github.com/containerd/containerd/api v1.7.19 // indirect
|
||||
github.com/containerd/ttrpc v1.2.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -108,22 +115,21 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@@ -133,8 +139,9 @@ require (
|
||||
github.com/prometheus/client_golang v1.17.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
@@ -146,25 +153,23 @@ require (
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
228
go.sum
228
go.sum
@@ -1,6 +1,6 @@
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0=
|
||||
cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
@@ -15,8 +15,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.11.5 h1:haEcLNpj9Ka1gd3B3tAEs9CpE0c+1IhoL59w/exYU38=
|
||||
github.com/Microsoft/hcsshim v0.11.5/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
|
||||
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
|
||||
github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
@@ -80,54 +80,61 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.1 h1:tKuYJwAVgxIryRrsvWJSf1kNviVOQVVqwyHsV6YoIUc=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.1/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6 h1:d0Cs0DffmOwmSzs0YPHwKCskknGq2jfGg4uGowlEpps=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A=
|
||||
github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI=
|
||||
github.com/containerd/containerd v1.7.21 h1:USGXRK1eOC/SX0L195YgxTHb0a00anxajOzgfN0qrCA=
|
||||
github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g=
|
||||
github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
|
||||
github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
|
||||
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk=
|
||||
github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE=
|
||||
github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco=
|
||||
github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/containerd/stargz-snapshotter v0.15.1 h1:fpsP4kf/Z4n2EYnU0WT8ZCE3eiKDwikDhL6VwxIlgeA=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/ttrpc v1.2.4 h1:eQCQK4h9dxDmpOb9QOOMh2NHTfzroH1IkmHiKZi05Oo=
|
||||
github.com/containerd/ttrpc v1.2.4/go.mod h1:ojvb8SJBSch0XkqNO0L0YX/5NxR3UnVk2LzFKBK0upc=
|
||||
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
|
||||
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
|
||||
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||||
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc=
|
||||
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.7.0 h1:M2Da98Unz2kz3A5d4yeSGbhyOge2mfYSNjAFt01Rw0M=
|
||||
github.com/docker/cli-docs-tool v0.7.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.2.0+incompatible h1:yHD1QEB1/0vr5eBNpu8tncu8gWxg8EydFPOSKHzXSMM=
|
||||
github.com/docker/cli v27.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.8.0 h1:YcDWl7rQJC3lJ7WVZRwSs3bc9nka97QLWfyJQli8yJU=
|
||||
github.com/docker/cli-docs-tool v0.8.0/go.mod h1:8TQQ3E7mOXoYUs811LiPdUnAhXrcVsBIrW21a5pUbdk=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4=
|
||||
github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
@@ -145,8 +152,8 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@@ -157,8 +164,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
@@ -174,8 +182,10 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
|
||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -183,8 +193,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -193,6 +203,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
|
||||
@@ -208,16 +219,16 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
@@ -259,8 +270,8 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@@ -288,18 +299,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
|
||||
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/buildkit v0.14.0-rc1 h1:pdnGXXT0Wu480PMQzyFUBN/LgtfIwlYoLlT7jba1IVQ=
|
||||
github.com/moby/buildkit v0.14.0-rc1/go.mod h1:J3TW5gH3fc2BVrLnqxnGGQauChFwCzhv6FPkBKTGciU=
|
||||
github.com/moby/buildkit v0.16.0-rc2 h1:5uFWrGujJOWHu0duIcz0tBagS97xddogZ3OxNEfezrU=
|
||||
github.com/moby/buildkit v0.16.0-rc2/go.mod h1:9STuyLZDJNGenp/smTiR01mnvqlO5u5ZW/0/aWHQcio=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
@@ -308,14 +317,16 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
|
||||
github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -348,8 +359,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
@@ -382,12 +393,13 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
|
||||
@@ -405,8 +417,8 @@ github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/u
|
||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
|
||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
@@ -427,12 +439,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c h1:+6wg/4ORAbnSoGDzg2Q1i3CeMcT/jjhye/ZfnBHy7/M=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c/go.mod h1:vbbYqJlnswsbJqWUcJN8fKtBhnEgldDrcagTgnBVKKM=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw=
|
||||
@@ -449,6 +463,7 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
|
||||
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
||||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
@@ -464,12 +479,10 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJ
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
@@ -496,34 +509,38 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -533,27 +550,34 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -561,19 +585,19 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
|
||||
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
|
@@ -1,20 +1,21 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM alpine:3.14 AS gen
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS gen
|
||||
RUN apk add --no-cache git
|
||||
WORKDIR /src
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
mkdir /out
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
{
|
||||
echo "# This file lists all individuals having contributed content to the repository."
|
||||
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > /out/AUTHORS
|
||||
cat /out/AUTHORS
|
||||
set -e
|
||||
mkdir /out
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
{
|
||||
echo "# This file lists all individuals having contributed content to the repository."
|
||||
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > /out/AUTHORS
|
||||
cat /out/AUTHORS
|
||||
EOT
|
||||
|
||||
FROM scratch AS update
|
||||
@@ -22,12 +23,12 @@ COPY --from=gen /out /
|
||||
|
||||
FROM gen AS validate
|
||||
RUN --mount=type=bind,target=.,rw <<EOT
|
||||
set -e
|
||||
git add -A
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||
git status --porcelain -- AUTHORS
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
git add -A
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||
git status --porcelain -- AUTHORS
|
||||
exit 1
|
||||
fi
|
||||
EOT
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22
|
||||
ARG FORMATS=md,yaml
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS docsgen
|
||||
|
@@ -5,7 +5,7 @@
|
||||
# Copyright The Buildx Authors.
|
||||
# Licensed under the Apache License, Version 2.0
|
||||
|
||||
ARG GO_VERSION="1.21"
|
||||
ARG GO_VERSION="1.22"
|
||||
ARG PROTOC_VERSION="3.11.4"
|
||||
|
||||
# protoc is dynamically linked to glibc so can't use alpine base
|
||||
|
30
hack/dockerfiles/govulncheck.Dockerfile
Normal file
30
hack/dockerfiles/govulncheck.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION="1.22"
|
||||
ARG GOVULNCHECK_VERSION="v1.1.3"
|
||||
ARG FORMAT="text"
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
WORKDIR /go/src/github.com/docker/buildx
|
||||
RUN apk add --no-cache jq moreutils
|
||||
ARG GOVULNCHECK_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go install golang.org/x/vuln/cmd/govulncheck@$GOVULNCHECK_VERSION
|
||||
|
||||
FROM base AS run
|
||||
ARG FORMAT
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
set -ex
|
||||
mkdir /out
|
||||
govulncheck -format ${FORMAT} ./... | tee /out/govulncheck.out
|
||||
if [ "${FORMAT}" = "sarif" ]; then
|
||||
# Make sure "results" field is defined in SARIF output otherwise GitHub Code Scanning
|
||||
# will fail when uploading report with "Invalid SARIF. Missing 'results' array in run."
|
||||
# Relates to https://github.com/golang/vuln/blob/ffdef74cc44d7eb71931d8d414c478b966812488/internal/sarif/sarif.go#L69
|
||||
jq '(.runs[] | select(.results == null) | .results) |= []' /out/govulncheck.out | tee >(sponge /out/govulncheck.out)
|
||||
fi
|
||||
EOT
|
||||
|
||||
FROM scratch AS output
|
||||
COPY --from=run /out /
|
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22
|
||||
ARG XX_VERSION=1.3.0
|
||||
ARG GOLANGCI_LINT_VERSION=1.57.2
|
||||
ARG GOPLS_VERSION=v0.20.0
|
||||
@@ -13,18 +13,24 @@ FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golang-base
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
|
||||
FROM golang-base AS lint
|
||||
FROM golang-base AS lint-base
|
||||
ENV GOFLAGS="-buildvcs=false"
|
||||
ARG GOLANGCI_LINT_VERSION
|
||||
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v${GOLANGCI_LINT_VERSION}
|
||||
COPY --link --from=xx / /
|
||||
WORKDIR /go/src/github.com/docker/buildx
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
FROM lint-base AS lint
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
||||
--mount=target=/root/.cache,type=cache,id=lint-cache-$TARGETPLATFORM \
|
||||
xx-go --wrap && \
|
||||
golangci-lint run
|
||||
|
||||
FROM lint-base AS validate-golangci
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
||||
golangci-lint config verify
|
||||
|
||||
FROM golang-base AS gopls
|
||||
RUN apk add --no-cache git
|
||||
ARG GOPLS_VERSION
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21
|
||||
ARG MODOUTDATED_VERSION=v0.8.0
|
||||
ARG GO_VERSION=1.22
|
||||
ARG MODOUTDATED_VERSION=v0.9.0
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
RUN apk add --no-cache git rsync
|
||||
@@ -41,5 +41,5 @@ FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
||||
FROM base AS outdated
|
||||
RUN --mount=target=.,ro \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||
--mount=from=go-mod-outdated,source=/usr/bin/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||
go list -mod=readonly -u -m -json all | go-mod-outdated -update -direct
|
||||
|
23
hack/test
23
hack/test
@@ -4,6 +4,7 @@ set -eu -o pipefail
|
||||
|
||||
: "${BUILDX_CMD=docker buildx}"
|
||||
|
||||
: "${TEST_COVERAGE=}"
|
||||
: "${TEST_IMAGE_BUILD=1}"
|
||||
: "${TEST_IMAGE_ID=buildx-tests}"
|
||||
|
||||
@@ -22,7 +23,7 @@ testReportsDir="$(pwd)/bin/testreports"
|
||||
mkdir -p "$testReportsDir"
|
||||
testReportsVol="-v $testReportsDir:/testreports"
|
||||
gotestsumArgs="--format=standard-verbose --jsonfile=/testreports/go-test-report$TEST_REPORT_SUFFIX.json --junitfile=/testreports/junit-report$TEST_REPORT_SUFFIX.xml"
|
||||
gotestArgs="-mod=vendor -coverprofile=/testreports/coverage-report$TEST_REPORT_SUFFIX.txt -covermode=atomic"
|
||||
gotestArgs="-mod=vendor"
|
||||
|
||||
cacheVolume="buildx-test-cache"
|
||||
if ! docker container inspect "$cacheVolume" >/dev/null 2>/dev/null; then
|
||||
@@ -32,5 +33,23 @@ if [ "$TEST_KEEP_CACHE" != "1" ]; then
|
||||
trap 'docker rm -v $cacheVolume' EXIT
|
||||
fi
|
||||
|
||||
cid=$(docker create --rm -v /tmp $testReportsVol --volumes-from=$cacheVolume -e GITHUB_REF -e TEST_DOCKERD -e TEST_BUILDKIT_IMAGE -e TEST_BUILDKIT_TAG -e TEST_BUILDX_EXPERIMENTAL -e SKIP_INTEGRATION_TESTS -e GOTESTSUM_FORMAT ${BUILDKIT_INTEGRATION_SNAPSHOTTER:+"-eBUILDKIT_INTEGRATION_SNAPSHOTTER"} -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry --privileged $TEST_IMAGE_ID gotestsum $gotestsumArgs --packages="${TESTPKGS:-./...}" -- $gotestArgs ${TESTFLAGS:--v})
|
||||
if [ "$TEST_COVERAGE" = "1" ]; then
|
||||
export GO_TEST_COVERPROFILE="/testreports/coverage-report$TEST_REPORT_SUFFIX.txt"
|
||||
fi
|
||||
|
||||
cid=$(docker create --rm --privileged \
|
||||
-v /tmp $testReportsVol \
|
||||
--volumes-from=$cacheVolume \
|
||||
-e GITHUB_REF \
|
||||
-e TEST_DOCKERD \
|
||||
-e TEST_BUILDKIT_IMAGE \
|
||||
-e TEST_BUILDKIT_TAG \
|
||||
-e TEST_BUILDX_EXPERIMENTAL \
|
||||
-e SKIP_INTEGRATION_TESTS \
|
||||
-e GOTESTSUM_FORMAT \
|
||||
-e GO_TEST_COVERPROFILE \
|
||||
${BUILDKIT_INTEGRATION_SNAPSHOTTER:+"-eBUILDKIT_INTEGRATION_SNAPSHOTTER"} \
|
||||
-e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry \
|
||||
$TEST_IMAGE_ID gotestsumandcover $gotestsumArgs --packages="${TESTPKGS:-./...}" -- $gotestArgs ${TESTFLAGS:--v})
|
||||
|
||||
docker start -a -i $cid
|
||||
|
@@ -109,21 +109,21 @@ buildxCmd inspect --bootstrap --builder="${builderName}"
|
||||
|
||||
# create dockerfile
|
||||
cat > "${dockerfile}" <<EOL
|
||||
FROM busybox as build
|
||||
fRom busybox as build
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
||||
|
||||
FROM busybox AS log
|
||||
FROM busybox As log
|
||||
COPY --from=build /log /log
|
||||
RUN cat /log
|
||||
RUN uname -a
|
||||
|
||||
FROM busybox AS hello
|
||||
FROm busybox AS hello
|
||||
RUN echo hello > /hello
|
||||
|
||||
FROM scratch
|
||||
COPY --from=log /log /log
|
||||
CoPY --from=log /log /log
|
||||
COPY --from=hello /hello /hello
|
||||
EOL
|
||||
|
||||
|
@@ -21,9 +21,10 @@ const (
|
||||
type State struct {
|
||||
// Target is the name of the invoked target (default if empty)
|
||||
Target string
|
||||
// LocalPath is the absolute path to the context
|
||||
// LocalPath is the absolute path to the context or remote context
|
||||
LocalPath string
|
||||
// DockerfilePath is the absolute path to the Dockerfile
|
||||
// DockerfilePath is the absolute path to the Dockerfile or relative if
|
||||
// context is remote
|
||||
DockerfilePath string
|
||||
// GroupRef is the ref of the state group that this ref belongs to
|
||||
GroupRef string `json:",omitempty"`
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
592
tests/bake.go
592
tests/bake.go
@@ -1,14 +1,19 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/util/gitutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/frontend/subrequests/lint"
|
||||
"github.com/moby/buildkit/identity"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
@@ -26,6 +31,7 @@ func bakeCmd(sb integration.Sandbox, opts ...cmdOpt) (string, error) {
|
||||
}
|
||||
|
||||
var bakeTests = []func(t *testing.T, sb integration.Sandbox){
|
||||
testBakePrint,
|
||||
testBakeLocal,
|
||||
testBakeLocalMulti,
|
||||
testBakeRemote,
|
||||
@@ -42,9 +48,61 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){
|
||||
testBakeEmpty,
|
||||
testBakeShmSize,
|
||||
testBakeUlimits,
|
||||
testBakeMetadata,
|
||||
testBakeMetadataProvenance,
|
||||
testBakeMetadataWarnings,
|
||||
testBakeMetadataWarningsDedup,
|
||||
testBakeMultiExporters,
|
||||
testBakeLoadPush,
|
||||
testListTargets,
|
||||
testListVariables,
|
||||
testBakeCallCheck,
|
||||
testBakeCallCheckFlag,
|
||||
testBakeCallMetadata,
|
||||
testBakeMultiPlatform,
|
||||
testBakeCheckCallOutput,
|
||||
}
|
||||
|
||||
func testBakePrint(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
ARG HELLO
|
||||
RUN echo "Hello ${HELLO}"
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "build" {
|
||||
args = {
|
||||
HELLO = "foo"
|
||||
}
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withDir(dir), withArgs("bake", "--print", "build"))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
|
||||
var def struct {
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]*bake.Target `json:"target"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &def))
|
||||
|
||||
require.Len(t, def.Group, 1)
|
||||
require.Contains(t, def.Group, "default")
|
||||
|
||||
require.Equal(t, []string{"build"}, def.Group["default"].Targets)
|
||||
require.Len(t, def.Target, 1)
|
||||
require.Contains(t, def.Target, "build")
|
||||
require.Equal(t, ".", *def.Target["build"].Context)
|
||||
require.Equal(t, "Dockerfile", *def.Target["build"].Dockerfile)
|
||||
require.Equal(t, map[string]*string{"HELLO": ptrstr("foo")}, def.Target["build"].Args)
|
||||
}
|
||||
|
||||
func testBakeLocal(t *testing.T, sb integration.Sandbox) {
|
||||
@@ -633,19 +691,22 @@ target "default" {
|
||||
require.Contains(t, string(dt), `1024`)
|
||||
}
|
||||
|
||||
func testBakeMetadata(t *testing.T, sb integration.Sandbox) {
|
||||
func testBakeMetadataProvenance(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bakeMetadataProvenance(t, sb, "")
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "max")
|
||||
bakeMetadataProvenance(t, sb, "max")
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "min")
|
||||
bakeMetadataProvenance(t, sb, "min")
|
||||
})
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "disabled")
|
||||
bakeMetadataProvenance(t, sb, "disabled")
|
||||
})
|
||||
}
|
||||
|
||||
func bakeMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
func bakeMetadataProvenance(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
COPY foo /foo
|
||||
@@ -676,7 +737,7 @@ target "default" {
|
||||
withEnv("BUILDX_METADATA_PROVENANCE="+metadataMode),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, out)
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
@@ -706,6 +767,180 @@ target "default" {
|
||||
require.Equal(t, provenancetypes.BuildKitBuildType, prv.BuildType)
|
||||
}
|
||||
|
||||
func testBakeMetadataWarnings(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "")
|
||||
})
|
||||
t.Run("true", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "true")
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "false")
|
||||
})
|
||||
}
|
||||
|
||||
func bakeMetadataWarnings(t *testing.T, sb integration.Sandbox, mode string) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
dirDest := t.TempDir()
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
|
||||
withEnv("BUILDX_METADATA_WARNINGS="+mode),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
|
||||
Default struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"default"`
|
||||
}
|
||||
var md mdT
|
||||
err = json.Unmarshal(dt, &md)
|
||||
require.NoError(t, err, string(dt))
|
||||
|
||||
require.NotEmpty(t, md.Default.BuildRef, string(dt))
|
||||
if mode == "" || mode == "false" {
|
||||
require.Empty(t, md.BuildWarnings, string(dt))
|
||||
return
|
||||
}
|
||||
|
||||
skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBakeMetadataWarningsDedup(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
group "default" {
|
||||
targets = ["base", "def"]
|
||||
}
|
||||
target "base" {
|
||||
target = "base"
|
||||
}
|
||||
target "def" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
dirDest := t.TempDir()
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
|
||||
withEnv("BUILDX_METADATA_WARNINGS=true"),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
|
||||
Base struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"base"`
|
||||
Def struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"def"`
|
||||
}
|
||||
var md mdT
|
||||
err = json.Unmarshal(dt, &md)
|
||||
require.NoError(t, err, string(dt))
|
||||
|
||||
require.NotEmpty(t, md.Base.BuildRef, string(dt))
|
||||
require.NotEmpty(t, md.Def.BuildRef, string(dt))
|
||||
|
||||
skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBakeMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
||||
registry, err := sb.NewRegistry()
|
||||
if errors.Is(err, integration.ErrRequirements) {
|
||||
t.Skip(err.Error())
|
||||
}
|
||||
require.NoError(t, err)
|
||||
target := registry + "/buildx/registry:latest"
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM --platform=$BUILDPLATFORM busybox:latest AS base
|
||||
COPY foo /etc/foo
|
||||
RUN cp /etc/foo /etc/bar
|
||||
|
||||
FROM scratch
|
||||
COPY --from=base /etc/bar /bar
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo", []byte("foo"), 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withDir(dir), withArgs("bake"), withArgs("--set", fmt.Sprintf("*.output=type=image,name=%s,push=true", target)))
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if !isMobyWorker(sb) {
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
desc, provider, err := contentutil.ProviderFromRef(target)
|
||||
require.NoError(t, err)
|
||||
imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
|
||||
require.NoError(t, err)
|
||||
|
||||
img := imgs.Find("linux/amd64")
|
||||
require.NotNil(t, img)
|
||||
img = imgs.Find("linux/arm64")
|
||||
require.NotNil(t, img)
|
||||
|
||||
} else {
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
func testBakeMultiExporters(t *testing.T, sb integration.Sandbox) {
|
||||
if !isDockerContainerWorker(sb) {
|
||||
t.Skip("only testing with docker-container worker")
|
||||
@@ -821,3 +1056,346 @@ target "default" {
|
||||
|
||||
// TODO: test metadata file when supported by multi exporters https://github.com/docker/buildx/issues/2181
|
||||
}
|
||||
|
||||
func testListTargets(t *testing.T, sb integration.Sandbox) {
|
||||
bakefile := []byte(`
|
||||
target "foo" {
|
||||
description = "This builds foo"
|
||||
}
|
||||
target "abc" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
)
|
||||
|
||||
out, err := bakeCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("--list-targets"),
|
||||
)
|
||||
require.NoError(t, err, out)
|
||||
|
||||
require.Equal(t, "TARGET\tDESCRIPTION\nabc\t\nfoo\tThis builds foo", strings.TrimSpace(out))
|
||||
}
|
||||
|
||||
func testListVariables(t *testing.T, sb integration.Sandbox) {
|
||||
bakefile := []byte(`
|
||||
variable "foo" {
|
||||
default = "bar"
|
||||
description = "This is foo"
|
||||
}
|
||||
variable "abc" {
|
||||
default = null
|
||||
}
|
||||
variable "def" {
|
||||
}
|
||||
target "default" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
)
|
||||
|
||||
out, err := bakeCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("--list-variables"),
|
||||
)
|
||||
require.NoError(t, err, out)
|
||||
|
||||
require.Equal(t, "VARIABLE\tVALUE\tDESCRIPTION\nabc\t\t<null>\t\ndef\t\t\t\nfoo\t\tbar\tThis is foo", strings.TrimSpace(out))
|
||||
}
|
||||
|
||||
func testBakeCallCheck(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
COPy foo /foo
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "validate" {
|
||||
call = "check"
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
out, err := bakeCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("validate"),
|
||||
)
|
||||
require.Error(t, err, out)
|
||||
|
||||
require.Contains(t, out, "validate")
|
||||
require.Contains(t, out, "ConsistentInstructionCasing")
|
||||
}
|
||||
|
||||
func testBakeCallCheckFlag(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
COPy foo /foo
|
||||
`)
|
||||
dockerfile2 := []byte(`
|
||||
FROM scratch
|
||||
COPY foo$BAR /foo
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "build" {
|
||||
dockerfile = "a.Dockerfile"
|
||||
}
|
||||
|
||||
target "another" {
|
||||
dockerfile = "b.Dockerfile"
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("a.Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("b.Dockerfile", dockerfile2, 0600),
|
||||
)
|
||||
|
||||
out, err := bakeCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("build", "another", "--check"),
|
||||
)
|
||||
require.Error(t, err, out)
|
||||
|
||||
require.Contains(t, out, "build")
|
||||
require.Contains(t, out, "ConsistentInstructionCasing")
|
||||
|
||||
require.Contains(t, out, "another")
|
||||
require.Contains(t, out, "UndefinedVar")
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--progress=quiet", "build", "another", "--call", "check,format=json"),
|
||||
)
|
||||
outB, err := cmd.Output()
|
||||
require.Error(t, err, string(outB))
|
||||
|
||||
var res map[string]any
|
||||
err = json.Unmarshal(outB, &res)
|
||||
require.NoError(t, err, out)
|
||||
|
||||
targets, ok := res["target"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
|
||||
build, ok := targets["build"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
|
||||
_, ok = build["build"]
|
||||
require.True(t, ok)
|
||||
|
||||
check, ok := build["check"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
|
||||
warnings, ok := check["warnings"].([]any)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Len(t, warnings, 1)
|
||||
|
||||
another, ok := targets["another"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
|
||||
_, ok = another["build"]
|
||||
require.True(t, ok)
|
||||
|
||||
check, ok = another["check"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
|
||||
warnings, ok = check["warnings"].([]any)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Len(t, warnings, 1)
|
||||
}
|
||||
|
||||
func testBakeCallMetadata(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check,format=json", "--metadata-file", filepath.Join(dir, "md.json")),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
|
||||
var res map[string]any
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &res), stdout.String())
|
||||
targets, ok := res["target"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
def, ok := targets["default"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
_, ok = def["build"]
|
||||
require.True(t, ok)
|
||||
check, ok := def["check"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
warnings, ok := check["warnings"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, warnings, 3)
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
Default struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
ResultJSON lint.LintResults `json:"result.json"`
|
||||
} `json:"default"`
|
||||
}
|
||||
var md mdT
|
||||
require.NoError(t, json.Unmarshal(dt, &md), dt)
|
||||
require.Empty(t, md.Default.BuildRef)
|
||||
require.Len(t, md.Default.ResultJSON.Warnings, 3)
|
||||
}
|
||||
|
||||
func testBakeCheckCallOutput(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("check for warning count msg in check without warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, no warnings found.")
|
||||
})
|
||||
t.Run("check for warning count msg in check with single warning", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
})
|
||||
t.Run("check for warning count msg in check with multiple warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
|
||||
FROM busybox as base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
t.Run("check for warnings with multiple build targets", func(t *testing.T) {
|
||||
dockerfile1 := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
`)
|
||||
dockerfile2 := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
|
||||
FROM busybox as base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "first" {
|
||||
dockerfile = "Dockerfile.first"
|
||||
}
|
||||
target "second" {
|
||||
dockerfile = "Dockerfile.second"
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile.first", dockerfile1, 0600),
|
||||
fstest.CreateFile("Dockerfile.second", dockerfile2, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check", "first", "second"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user