mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-16 00:35:55 +08:00
Compare commits
191 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9e17bc7a4c | ||
![]() |
e1e8f5c68d | ||
![]() |
6ed39b2618 | ||
![]() |
03019049e8 | ||
![]() |
23ce21c341 | ||
![]() |
4dac5295a1 | ||
![]() |
9a48aca461 | ||
![]() |
679407862f | ||
![]() |
674cfff1a4 | ||
![]() |
19a241f4ed | ||
![]() |
7da31076ae | ||
![]() |
384f0565f5 | ||
![]() |
6df3a09284 | ||
![]() |
e7be640d9b | ||
![]() |
2f1be25b8f | ||
![]() |
a40edbb47b | ||
![]() |
2eaea647d8 | ||
![]() |
f3a3d9c26b | ||
![]() |
9ba3f77219 | ||
![]() |
2799ed6dd8 | ||
![]() |
719a41a4c3 | ||
![]() |
a9807be458 | ||
![]() |
7a7be2ffa1 | ||
![]() |
ab533b0cb4 | ||
![]() |
0855cab1bd | ||
![]() |
735555ff7b | ||
![]() |
67ccbd06f6 | ||
![]() |
c370f90b73 | ||
![]() |
9730a20f6b | ||
![]() |
2e93ac32bc | ||
![]() |
19c22136b4 | ||
![]() |
bad5063577 | ||
![]() |
286c018f84 | ||
![]() |
ac970c03e7 | ||
![]() |
5398c33937 | ||
![]() |
1365652a74 | ||
![]() |
a4f0a21468 | ||
![]() |
d55616b22c | ||
![]() |
113606a24c | ||
![]() |
cd38da0244 | ||
![]() |
cc6547c51d | ||
![]() |
26f2e002c6 | ||
![]() |
372feb38ff | ||
![]() |
b08d576ec0 | ||
![]() |
0034cdbffc | ||
![]() |
a9666e7df1 | ||
![]() |
b7e77af256 | ||
![]() |
d72ff8f88c | ||
![]() |
d75c650792 | ||
![]() |
8c74109330 | ||
![]() |
9f102b5c34 | ||
![]() |
b4b2dc9664 | ||
![]() |
2e81e301ae | ||
![]() |
fb4417e14d | ||
![]() |
eb74b483bd | ||
![]() |
db194abdc8 | ||
![]() |
86eb3be1c4 | ||
![]() |
a05a166f81 | ||
![]() |
cfc9d3a8c9 | ||
![]() |
5bac0b1197 | ||
![]() |
0b4e624aaa | ||
![]() |
b7b5a3a1cc | ||
![]() |
f8de3c3bdc | ||
![]() |
fa0c3e3786 | ||
![]() |
d69301d57b | ||
![]() |
ee77cdb175 | ||
![]() |
8fb1157b5f | ||
![]() |
a34cdff84e | ||
![]() |
77139daa4b | ||
![]() |
10e3892a63 | ||
![]() |
d80ece5bb3 | ||
![]() |
1f44971fc9 | ||
![]() |
a91db7ccc9 | ||
![]() |
df6d36af35 | ||
![]() |
98c3abb756 | ||
![]() |
3b824a0e39 | ||
![]() |
b0156cd631 | ||
![]() |
29614f9734 | ||
![]() |
f1b895196c | ||
![]() |
900502b139 | ||
![]() |
49bd7e4edc | ||
![]() |
8f9c25e8b0 | ||
![]() |
7659798f80 | ||
![]() |
7b8bf9f801 | ||
![]() |
8efc528b84 | ||
![]() |
8593e0397b | ||
![]() |
0c0e8eefdf | ||
![]() |
e114dd09a5 | ||
![]() |
d25e260d2e | ||
![]() |
86e4e77ac1 | ||
![]() |
534d9fc276 | ||
![]() |
e0c67bfc79 | ||
![]() |
53e576b306 | ||
![]() |
d3aef6642c | ||
![]() |
824cef1b92 | ||
![]() |
a8b0fa8965 | ||
![]() |
45dfb84361 | ||
![]() |
13ef01196d | ||
![]() |
646df6d4a0 | ||
![]() |
d46c1d8141 | ||
![]() |
c682742de0 | ||
![]() |
391acba718 | ||
![]() |
db4b96e62c | ||
![]() |
882ef0db91 | ||
![]() |
967fc2a696 | ||
![]() |
212d598ab1 | ||
![]() |
bf95aa3dfa | ||
![]() |
18ccba0720 | ||
![]() |
f5196f1167 | ||
![]() |
ef99381eab | ||
![]() |
a41c9fa649 | ||
![]() |
00fdcd38ab | ||
![]() |
97f1d47464 | ||
![]() |
337578242d | ||
![]() |
503a8925d2 | ||
![]() |
0d708c0bc2 | ||
![]() |
3a7523a117 | ||
![]() |
5dc1a3308d | ||
![]() |
eb78253dfd | ||
![]() |
5f8b78a113 | ||
![]() |
67d3ed34e4 | ||
![]() |
b88423be50 | ||
![]() |
c1e2ae5636 | ||
![]() |
23afb70e40 | ||
![]() |
812b42b329 | ||
![]() |
d5d3d3d502 | ||
![]() |
e19c729d3e | ||
![]() |
aefa49c4fa | ||
![]() |
7d927ee604 | ||
![]() |
058c098c8c | ||
![]() |
7b7dbe88b1 | ||
![]() |
cadf4a5893 | ||
![]() |
6cd9fef556 | ||
![]() |
963b9ca30d | ||
![]() |
4636c8051a | ||
![]() |
e23695d50d | ||
![]() |
6eff9b2d51 | ||
![]() |
fcbfc85f42 | ||
![]() |
9a204c44c3 | ||
![]() |
4c6eba5acd | ||
![]() |
fea7459880 | ||
![]() |
e2d52a8465 | ||
![]() |
48a591b1e1 | ||
![]() |
128acdb471 | ||
![]() |
411d3f8cea | ||
![]() |
7925a96726 | ||
![]() |
b06bddfee6 | ||
![]() |
fe17ebda89 | ||
![]() |
4ed1e07f16 | ||
![]() |
f49593ce2c | ||
![]() |
4e91fe6507 | ||
![]() |
921b576f3a | ||
![]() |
548c80ab5a | ||
![]() |
f3a4740d5f | ||
![]() |
89917dc696 | ||
![]() |
f7276201ac | ||
![]() |
beb9f515c0 | ||
![]() |
4f7d145c0e | ||
![]() |
ccdf63c644 | ||
![]() |
9a6b8754b1 | ||
![]() |
e75ac22ba6 | ||
![]() |
62f5cc7c80 | ||
![]() |
6272ae1afa | ||
![]() |
accfbf6e24 | ||
![]() |
af2d8fe555 | ||
![]() |
18f4275a92 | ||
![]() |
221a608b3c | ||
![]() |
cc0391eba5 | ||
![]() |
aef388bf7a | ||
![]() |
80c16bc28c | ||
![]() |
75160643e1 | ||
![]() |
ad18ffc018 | ||
![]() |
80c3832c94 | ||
![]() |
7762ab2c38 | ||
![]() |
b973de2dd3 | ||
![]() |
352ce7e875 | ||
![]() |
cdfc1ed750 | ||
![]() |
d0d3433b12 | ||
![]() |
b04d39494f | ||
![]() |
52f503e806 | ||
![]() |
79a978484d | ||
![]() |
f7992033bf | ||
![]() |
73f61aa338 | ||
![]() |
faa573f484 | ||
![]() |
0a4a1babd1 | ||
![]() |
461bd9e5d1 | ||
![]() |
d6fdf83f45 | ||
![]() |
ef4e9fea83 | ||
![]() |
0c296fe857 | ||
![]() |
2dc0350ffe | ||
![]() |
b85fc5c484 |
37
.github/workflows/build.yml
vendored
37
.github/workflows/build.yml
vendored
@@ -54,9 +54,9 @@ jobs:
|
|||||||
- master
|
- master
|
||||||
- latest
|
- latest
|
||||||
- buildx-stable-1
|
- buildx-stable-1
|
||||||
|
- v0.20.2
|
||||||
- v0.19.0
|
- v0.19.0
|
||||||
- v0.18.2
|
- v0.18.2
|
||||||
- v0.17.2
|
|
||||||
worker:
|
worker:
|
||||||
- docker-container
|
- docker-container
|
||||||
- remote
|
- remote
|
||||||
@@ -76,6 +76,16 @@ jobs:
|
|||||||
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
pkg: ./tests
|
pkg: ./tests
|
||||||
mode: experimental
|
mode: experimental
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
- worker: "docker@26.1"
|
- worker: "docker@26.1"
|
||||||
pkg: ./tests
|
pkg: ./tests
|
||||||
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
@@ -248,12 +258,20 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- freebsd
|
- freebsd
|
||||||
|
- netbsd
|
||||||
- openbsd
|
- openbsd
|
||||||
|
env:
|
||||||
|
# https://github.com/hashicorp/vagrant/issues/13652
|
||||||
|
VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT: 1
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
run: |
|
run: |
|
||||||
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Sets semver Go version to be able to download tarball during vagrant setup
|
||||||
|
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
|
||||||
|
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -396,6 +414,18 @@ jobs:
|
|||||||
- test-unit
|
- test-unit
|
||||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Free disk space
|
||||||
|
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||||
|
with:
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
swap-storage: true
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
@@ -429,9 +459,10 @@ jobs:
|
|||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v6
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
cwd://${{ steps.meta.outputs.bake-file }}
|
${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
sbom: true
|
sbom: true
|
||||||
@@ -504,7 +535,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2.2.2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
|
2
.github/workflows/docs-release.yml
vendored
2
.github/workflows/docs-release.yml
vendored
@@ -77,7 +77,7 @@ jobs:
|
|||||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||||
-
|
-
|
||||||
name: Create PR on docs repo
|
name: Create PR on docs repo
|
||||||
uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6
|
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
push-to-fork: docker-tools-robot/docker.github.io
|
||||||
|
6
.github/workflows/e2e.yml
vendored
6
.github/workflows/e2e.yml
vendored
@@ -29,7 +29,7 @@ env:
|
|||||||
SETUP_BUILDX_VERSION: "edge"
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
K3S_VERSION: "v1.21.2-k3s1"
|
K3S_VERSION: "v1.32.2+k3s1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
||||||
driver:
|
driver:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
strategy:
|
strategy:
|
||||||
@@ -153,7 +153,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
|
||||||
with:
|
with:
|
||||||
version: ${{ env.K3S_VERSION }}
|
version: ${{ env.K3S_VERSION }}
|
||||||
-
|
-
|
||||||
|
17
.github/workflows/pr-assign-author.yml
vendored
Normal file
17
.github/workflows/pr-assign-author.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
name: pr-assign-author
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- reopened
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run:
|
||||||
|
uses: crazy-max/.github/.github/workflows/pr-assign-author.yml@c27924b5b93ccfe6dcc0d7b22e779ef3c05f9a92
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
@@ -1,9 +1,6 @@
|
|||||||
run:
|
run:
|
||||||
timeout: 30m
|
timeout: 30m
|
||||||
modules-download-mode: vendor
|
modules-download-mode: vendor
|
||||||
# default uses Go version from the go.mod file, fallback on the env var
|
|
||||||
# `GOVERSION`, fallback on 1.17: https://golangci-lint.run/usage/configuration/#run-configuration
|
|
||||||
go: "1.23"
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
22
Dockerfile
22
Dockerfile
@@ -5,20 +5,23 @@ ARG ALPINE_VERSION=3.21
|
|||||||
ARG XX_VERSION=1.6.1
|
ARG XX_VERSION=1.6.1
|
||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
ARG DOCKER_VERSION=28.0.0-rc.1
|
ARG DOCKER_VERSION=28.1.0
|
||||||
|
ARG DOCKER_VERSION_ALT_27=27.5.1
|
||||||
ARG DOCKER_VERSION_ALT_26=26.1.3
|
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||||
ARG GOTESTSUM_VERSION=v1.12.0
|
ARG GOTESTSUM_VERSION=v1.12.0
|
||||||
ARG REGISTRY_VERSION=2.8.3
|
ARG REGISTRY_VERSION=3.0.0
|
||||||
ARG BUILDKIT_VERSION=v0.19.0
|
ARG BUILDKIT_VERSION=v0.21.0
|
||||||
ARG UNDOCK_VERSION=0.9.0
|
ARG UNDOCK_VERSION=0.9.0
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
||||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||||
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
|
||||||
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
|
||||||
FROM registry:$REGISTRY_VERSION AS registry
|
FROM registry:$REGISTRY_VERSION AS registry
|
||||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||||
@@ -102,6 +105,7 @@ COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
|||||||
FROM binaries-unix AS binaries-darwin
|
FROM binaries-unix AS binaries-darwin
|
||||||
FROM binaries-unix AS binaries-freebsd
|
FROM binaries-unix AS binaries-freebsd
|
||||||
FROM binaries-unix AS binaries-linux
|
FROM binaries-unix AS binaries-linux
|
||||||
|
FROM binaries-unix AS binaries-netbsd
|
||||||
FROM binaries-unix AS binaries-openbsd
|
FROM binaries-unix AS binaries-openbsd
|
||||||
|
|
||||||
FROM scratch AS binaries-windows
|
FROM scratch AS binaries-windows
|
||||||
@@ -127,13 +131,15 @@ COPY --link --from=gotestsum /out /usr/bin/
|
|||||||
COPY --link --from=registry /bin/registry /usr/bin/
|
COPY --link --from=registry /bin/registry /usr/bin/
|
||||||
COPY --link --from=docker-engine / /usr/bin/
|
COPY --link --from=docker-engine / /usr/bin/
|
||||||
COPY --link --from=docker-cli / /usr/bin/
|
COPY --link --from=docker-cli / /usr/bin/
|
||||||
COPY --link --from=docker-engine-alt / /opt/docker-alt-26/
|
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
|
||||||
COPY --link --from=docker-cli-alt / /opt/docker-alt-26/
|
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
|
||||||
|
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
|
||||||
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||||
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||||
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||||
COPY --link --from=binaries /buildx /usr/bin/
|
COPY --link --from=binaries /buildx /usr/bin/
|
||||||
ENV TEST_DOCKER_EXTRA="docker@26.1=/opt/docker-alt-26"
|
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
|
||||||
|
|
||||||
FROM integration-test-base AS integration-test
|
FROM integration-test-base AS integration-test
|
||||||
COPY . .
|
COPY . .
|
||||||
|
@@ -21,7 +21,7 @@
|
|||||||
- [Verify essential information](#verify-essential-information)
|
- [Verify essential information](#verify-essential-information)
|
||||||
- [Classify the issue](#classify-the-issue)
|
- [Classify the issue](#classify-the-issue)
|
||||||
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||||
- [Issue lifecyle](#issue-lifecyle)
|
- [Issue lifecycle](#issue-lifecycle)
|
||||||
- [Examples](#examples)
|
- [Examples](#examples)
|
||||||
- [Submitting a bug](#submitting-a-bug)
|
- [Submitting a bug](#submitting-a-bug)
|
||||||
- [Pull request review process](#pull-request-review-process)
|
- [Pull request review process](#pull-request-review-process)
|
||||||
@@ -308,7 +308,7 @@ Examples:
|
|||||||
- Bugs in non-default configurations
|
- Bugs in non-default configurations
|
||||||
- Most enhancements
|
- Most enhancements
|
||||||
|
|
||||||
## Issue lifecyle
|
## Issue lifecycle
|
||||||
|
|
||||||
```mermaid
|
```mermaid
|
||||||
flowchart LR
|
flowchart LR
|
||||||
|
136
bake/bake.go
136
bake/bake.go
@@ -3,7 +3,9 @@ package bake
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding"
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -27,7 +29,6 @@ import (
|
|||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/convert"
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
@@ -46,6 +47,7 @@ type File struct {
|
|||||||
type Override struct {
|
type Override struct {
|
||||||
Value string
|
Value string
|
||||||
ArrValue []string
|
ArrValue []string
|
||||||
|
Append bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultFilenames() []string {
|
func defaultFilenames() []string {
|
||||||
@@ -486,10 +488,8 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
|||||||
if target == name {
|
if target == name {
|
||||||
return errors.Errorf("target %s cannot link to itself", target)
|
return errors.Errorf("target %s cannot link to itself", target)
|
||||||
}
|
}
|
||||||
for _, v := range visited {
|
if slices.Contains(visited, target) {
|
||||||
if v == target {
|
return errors.Errorf("infinite loop from %s to %s", name, target)
|
||||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
t2, ok := m[target]
|
t2, ok := m[target]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -529,9 +529,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
m := map[string]map[string]Override{}
|
m := map[string]map[string]Override{}
|
||||||
for _, v := range v {
|
for _, v := range v {
|
||||||
parts := strings.SplitN(v, "=", 2)
|
parts := strings.SplitN(v, "=", 2)
|
||||||
keys := strings.SplitN(parts[0], ".", 3)
|
|
||||||
|
skey := strings.TrimSuffix(parts[0], "+")
|
||||||
|
appendTo := strings.HasSuffix(parts[0], "+")
|
||||||
|
keys := strings.SplitN(skey, ".", 3)
|
||||||
if len(keys) < 2 {
|
if len(keys) < 2 {
|
||||||
return nil, errors.Errorf("invalid override key %s, expected target.name", parts[0])
|
return nil, errors.Errorf("invalid override key %s, expected target.name", skey)
|
||||||
}
|
}
|
||||||
|
|
||||||
pattern := keys[0]
|
pattern := keys[0]
|
||||||
@@ -544,8 +547,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kk := strings.SplitN(parts[0], ".", 2)
|
okey := strings.Join(keys[1:], ".")
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
t, ok := m[name]
|
t, ok := m[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -553,14 +555,15 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
m[name] = t
|
m[name] = t
|
||||||
}
|
}
|
||||||
|
|
||||||
o := t[kk[1]]
|
override := t[okey]
|
||||||
|
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update
|
// IMPORTANT: if you add more fields here, do not forget to update
|
||||||
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
|
// docs/reference/buildx_bake.md (--set) and https://docs.docker.com/build/bake/overrides/
|
||||||
switch keys[1] {
|
switch keys[1] {
|
||||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network", "annotations":
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.ArrValue = append(o.ArrValue, parts[1])
|
override.Append = appendTo
|
||||||
|
override.ArrValue = append(override.ArrValue, parts[1])
|
||||||
}
|
}
|
||||||
case "args":
|
case "args":
|
||||||
if len(keys) != 3 {
|
if len(keys) != 3 {
|
||||||
@@ -571,7 +574,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
o.Value = v
|
override.Value = v
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case "contexts":
|
case "contexts":
|
||||||
@@ -581,11 +584,11 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.Value = parts[1]
|
override.Value = parts[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t[kk[1]] = o
|
t[okey] = override
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
@@ -731,6 +734,41 @@ type Target struct {
|
|||||||
linked bool
|
linked bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Target) MarshalJSON() ([]byte, error) {
|
||||||
|
tgt := *t
|
||||||
|
esc := func(s string) string {
|
||||||
|
return strings.ReplaceAll(strings.ReplaceAll(s, "${", "$${"), "%{", "%%{")
|
||||||
|
}
|
||||||
|
|
||||||
|
tgt.Annotations = slices.Clone(t.Annotations)
|
||||||
|
for i, v := range tgt.Annotations {
|
||||||
|
tgt.Annotations[i] = esc(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tgt.DockerfileInline != nil {
|
||||||
|
escaped := esc(*tgt.DockerfileInline)
|
||||||
|
tgt.DockerfileInline = &escaped
|
||||||
|
}
|
||||||
|
|
||||||
|
tgt.Labels = maps.Clone(t.Labels)
|
||||||
|
for k, v := range t.Labels {
|
||||||
|
if v != nil {
|
||||||
|
escaped := esc(*v)
|
||||||
|
tgt.Labels[k] = &escaped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tgt.Args = maps.Clone(t.Args)
|
||||||
|
for k, v := range t.Args {
|
||||||
|
if v != nil {
|
||||||
|
escaped := esc(*v)
|
||||||
|
tgt.Args[k] = &escaped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(tgt)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ hclparser.WithEvalContexts = &Target{}
|
_ hclparser.WithEvalContexts = &Target{}
|
||||||
_ hclparser.WithGetName = &Target{}
|
_ hclparser.WithGetName = &Target{}
|
||||||
@@ -897,13 +935,21 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
|||||||
}
|
}
|
||||||
t.Labels[keys[1]] = &value
|
t.Labels[keys[1]] = &value
|
||||||
case "tags":
|
case "tags":
|
||||||
t.Tags = o.ArrValue
|
if o.Append {
|
||||||
|
t.Tags = append(t.Tags, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Tags = o.ArrValue
|
||||||
|
}
|
||||||
case "cache-from":
|
case "cache-from":
|
||||||
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
|
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.CacheFrom = cacheFrom
|
if o.Append {
|
||||||
|
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||||
|
} else {
|
||||||
|
t.CacheFrom = cacheFrom
|
||||||
|
}
|
||||||
for _, c := range t.CacheFrom {
|
for _, c := range t.CacheFrom {
|
||||||
if c.Type == "local" {
|
if c.Type == "local" {
|
||||||
if v, ok := c.Attrs["src"]; ok {
|
if v, ok := c.Attrs["src"]; ok {
|
||||||
@@ -916,7 +962,11 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.CacheTo = cacheTo
|
if o.Append {
|
||||||
|
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||||
|
} else {
|
||||||
|
t.CacheTo = cacheTo
|
||||||
|
}
|
||||||
for _, c := range t.CacheTo {
|
for _, c := range t.CacheTo {
|
||||||
if c.Type == "local" {
|
if c.Type == "local" {
|
||||||
if v, ok := c.Attrs["dest"]; ok {
|
if v, ok := c.Attrs["dest"]; ok {
|
||||||
@@ -933,7 +983,11 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid value for outputs")
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
}
|
}
|
||||||
t.Secrets = secrets
|
if o.Append {
|
||||||
|
t.Secrets = t.Secrets.Merge(secrets)
|
||||||
|
} else {
|
||||||
|
t.Secrets = secrets
|
||||||
|
}
|
||||||
for _, s := range t.Secrets {
|
for _, s := range t.Secrets {
|
||||||
if s.FilePath != "" {
|
if s.FilePath != "" {
|
||||||
ent.FSRead = append(ent.FSRead, s.FilePath)
|
ent.FSRead = append(ent.FSRead, s.FilePath)
|
||||||
@@ -944,18 +998,30 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid value for outputs")
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
}
|
}
|
||||||
t.SSH = ssh
|
if o.Append {
|
||||||
|
t.SSH = t.SSH.Merge(ssh)
|
||||||
|
} else {
|
||||||
|
t.SSH = ssh
|
||||||
|
}
|
||||||
for _, s := range t.SSH {
|
for _, s := range t.SSH {
|
||||||
ent.FSRead = append(ent.FSRead, s.Paths...)
|
ent.FSRead = append(ent.FSRead, s.Paths...)
|
||||||
}
|
}
|
||||||
case "platform":
|
case "platform":
|
||||||
t.Platforms = o.ArrValue
|
if o.Append {
|
||||||
|
t.Platforms = append(t.Platforms, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Platforms = o.ArrValue
|
||||||
|
}
|
||||||
case "output":
|
case "output":
|
||||||
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
|
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid value for outputs")
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
}
|
}
|
||||||
t.Outputs = outputs
|
if o.Append {
|
||||||
|
t.Outputs = t.Outputs.Merge(outputs)
|
||||||
|
} else {
|
||||||
|
t.Outputs = outputs
|
||||||
|
}
|
||||||
for _, o := range t.Outputs {
|
for _, o := range t.Outputs {
|
||||||
if o.Destination != "" {
|
if o.Destination != "" {
|
||||||
ent.FSWrite = append(ent.FSWrite, o.Destination)
|
ent.FSWrite = append(ent.FSWrite, o.Destination)
|
||||||
@@ -985,11 +1051,19 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
|||||||
}
|
}
|
||||||
t.NoCache = &noCache
|
t.NoCache = &noCache
|
||||||
case "no-cache-filter":
|
case "no-cache-filter":
|
||||||
t.NoCacheFilter = o.ArrValue
|
if o.Append {
|
||||||
|
t.NoCacheFilter = append(t.NoCacheFilter, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.NoCacheFilter = o.ArrValue
|
||||||
|
}
|
||||||
case "shm-size":
|
case "shm-size":
|
||||||
t.ShmSize = &value
|
t.ShmSize = &value
|
||||||
case "ulimits":
|
case "ulimits":
|
||||||
t.Ulimits = o.ArrValue
|
if o.Append {
|
||||||
|
t.Ulimits = append(t.Ulimits, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Ulimits = o.ArrValue
|
||||||
|
}
|
||||||
case "network":
|
case "network":
|
||||||
t.NetworkMode = &value
|
t.NetworkMode = &value
|
||||||
case "pull":
|
case "pull":
|
||||||
@@ -1067,9 +1141,7 @@ func (t *Target) GetEvalContexts(ectx *hcl.EvalContext, block *hcl.Block, loadDe
|
|||||||
e2 := ectx.NewChild()
|
e2 := ectx.NewChild()
|
||||||
e2.Variables = make(map[string]cty.Value)
|
e2.Variables = make(map[string]cty.Value)
|
||||||
if e != ectx {
|
if e != ectx {
|
||||||
for k, v := range e.Variables {
|
maps.Copy(e2.Variables, e.Variables)
|
||||||
e2.Variables[k] = v
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
e2.Variables[k] = v
|
e2.Variables[k] = v
|
||||||
ectxs2 = append(ectxs2, e2)
|
ectxs2 = append(ectxs2, e2)
|
||||||
@@ -1288,8 +1360,8 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||||
}
|
}
|
||||||
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !path.IsAbs(bi.DockerfilePath) {
|
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !filepath.IsAbs(bi.DockerfilePath) {
|
||||||
bi.DockerfilePath = path.Join(bi.ContextPath, bi.DockerfilePath)
|
bi.DockerfilePath = filepath.Join(bi.ContextPath, bi.DockerfilePath)
|
||||||
}
|
}
|
||||||
for k, v := range bi.NamedContexts {
|
for k, v := range bi.NamedContexts {
|
||||||
if strings.HasPrefix(v.Path, "cwd://") {
|
if strings.HasPrefix(v.Path, "cwd://") {
|
||||||
@@ -1434,9 +1506,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
bo.Ulimits = ulimits
|
bo.Ulimits = ulimits
|
||||||
|
|
||||||
for _, ent := range t.Entitlements {
|
bo.Allow = append(bo.Allow, t.Entitlements...)
|
||||||
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
|
||||||
}
|
|
||||||
|
|
||||||
return bo, nil
|
return bo, nil
|
||||||
}
|
}
|
||||||
|
@@ -34,6 +34,18 @@ target "webapp" {
|
|||||||
args = {
|
args = {
|
||||||
VAR_BOTH = "webapp"
|
VAR_BOTH = "webapp"
|
||||||
}
|
}
|
||||||
|
annotations = [
|
||||||
|
"index,manifest:org.opencontainers.image.authors=dvdksn"
|
||||||
|
]
|
||||||
|
attest = [
|
||||||
|
"type=provenance,mode=max"
|
||||||
|
]
|
||||||
|
platforms = [
|
||||||
|
"linux/amd64"
|
||||||
|
]
|
||||||
|
secret = [
|
||||||
|
"id=FOO,env=FOO"
|
||||||
|
]
|
||||||
inherits = ["webDEP"]
|
inherits = ["webDEP"]
|
||||||
}`),
|
}`),
|
||||||
}
|
}
|
||||||
@@ -115,6 +127,31 @@ target "webapp" {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("AnnotationsOverrides", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.annotations=index,manifest:org.opencontainers.image.vendor=docker"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"index,manifest:org.opencontainers.image.authors=dvdksn", "index,manifest:org.opencontainers.image.vendor=docker"}, m["webapp"].Annotations)
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"webapp"}, g["default"].Targets)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("AttestOverride", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest=type=sbom"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, m["webapp"].Attest, 2)
|
||||||
|
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
|
||||||
|
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("AttestAppend", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest+=type=sbom"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, m["webapp"].Attest, 2)
|
||||||
|
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
|
||||||
|
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("ContextOverride", func(t *testing.T) {
|
t.Run("ContextOverride", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil, &EntitlementConf{})
|
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil, &EntitlementConf{})
|
||||||
@@ -136,6 +173,49 @@ target "webapp" {
|
|||||||
require.Equal(t, []string{"webapp"}, g["default"].Targets)
|
require.Equal(t, []string{"webapp"}, g["default"].Targets)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("PlatformOverride", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform=linux/arm64"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/arm64"}, m["webapp"].Platforms)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("PlatformAppend", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, m["webapp"].Platforms)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("PlatformAppendMulti", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform+=linux/riscv64"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/amd64", "linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("PlatformAppendMultiLastOverride", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform=linux/riscv64"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SecretsOverride", func(t *testing.T) {
|
||||||
|
t.Setenv("FOO", "foo")
|
||||||
|
t.Setenv("BAR", "bar")
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets=id=BAR,env=BAR"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, m["webapp"].Secrets, 1)
|
||||||
|
require.Equal(t, "BAR", m["webapp"].Secrets[0].ID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SecretsAppend", func(t *testing.T) {
|
||||||
|
t.Setenv("FOO", "foo")
|
||||||
|
t.Setenv("BAR", "bar")
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets+=id=BAR,env=BAR"}, nil, &EntitlementConf{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, m["webapp"].Secrets, 2)
|
||||||
|
require.Equal(t, "FOO", m["webapp"].Secrets[0].ID)
|
||||||
|
require.Equal(t, "BAR", m["webapp"].Secrets[1].ID)
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("ShmSizeOverride", func(t *testing.T) {
|
t.Run("ShmSizeOverride", func(t *testing.T) {
|
||||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.shm-size=256m"}, nil, &EntitlementConf{})
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.shm-size=256m"}, nil, &EntitlementConf{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -612,7 +692,7 @@ func TestHCLContextCwdPrefix(t *testing.T) {
|
|||||||
require.Contains(t, m, "app")
|
require.Contains(t, m, "app")
|
||||||
assert.Equal(t, "test", *m["app"].Dockerfile)
|
assert.Equal(t, "test", *m["app"].Dockerfile)
|
||||||
assert.Equal(t, "foo", *m["app"].Context)
|
assert.Equal(t, "foo", *m["app"].Context)
|
||||||
assert.Equal(t, "foo/test", bo["app"].Inputs.DockerfilePath)
|
assert.Equal(t, filepath.Clean("foo/test"), bo["app"].Inputs.DockerfilePath)
|
||||||
assert.Equal(t, "foo", bo["app"].Inputs.ContextPath)
|
assert.Equal(t, "foo", bo["app"].Inputs.ContextPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1806,8 +1886,8 @@ func TestHCLEntitlements(t *testing.T) {
|
|||||||
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
||||||
|
|
||||||
require.Len(t, bo["app"].Allow, 2)
|
require.Len(t, bo["app"].Allow, 2)
|
||||||
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
|
require.Equal(t, entitlements.EntitlementSecurityInsecure.String(), bo["app"].Allow[0])
|
||||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
|
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||||
@@ -1846,7 +1926,7 @@ func TestEntitlementsForNetHostCompose(t *testing.T) {
|
|||||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||||
|
|
||||||
require.Len(t, bo["app"].Allow, 1)
|
require.Len(t, bo["app"].Allow, 1)
|
||||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
|
||||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1877,7 +1957,7 @@ func TestEntitlementsForNetHost(t *testing.T) {
|
|||||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||||
|
|
||||||
require.Len(t, bo["app"].Allow, 1)
|
require.Len(t, bo["app"].Allow, 1)
|
||||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
|
||||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2062,6 +2142,73 @@ target "app" {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestVariableValidationConditionNull(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(`
|
||||||
|
variable "PORT" {
|
||||||
|
default = 3000
|
||||||
|
validation {}
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
PORT = PORT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "Condition expression must return either true or false, not null")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVariableValidationConditionUnknownValue(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(`
|
||||||
|
variable "PORT" {
|
||||||
|
default = 3000
|
||||||
|
validation {
|
||||||
|
condition = "foo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
PORT = PORT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "Invalid condition result value: a bool is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVariableValidationInvalidErrorMessage(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = 0
|
||||||
|
validation {
|
||||||
|
condition = FOO > 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
FOO = FOO
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "This check failed, but has an invalid error message")
|
||||||
|
}
|
||||||
|
|
||||||
// https://github.com/docker/buildx/issues/2822
|
// https://github.com/docker/buildx/issues/2822
|
||||||
func TestVariableEmpty(t *testing.T) {
|
func TestVariableEmpty(t *testing.T) {
|
||||||
fp := File{
|
fp := File{
|
||||||
|
@@ -92,6 +92,9 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
if s.Build.AdditionalContexts != nil {
|
if s.Build.AdditionalContexts != nil {
|
||||||
additionalContexts = map[string]string{}
|
additionalContexts = map[string]string{}
|
||||||
for k, v := range s.Build.AdditionalContexts {
|
for k, v := range s.Build.AdditionalContexts {
|
||||||
|
if strings.HasPrefix(v, "service:") {
|
||||||
|
v = strings.Replace(v, "service:", "target:", 1)
|
||||||
|
}
|
||||||
additionalContexts[k] = v
|
additionalContexts[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -174,6 +177,7 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
CacheFrom: cacheFrom,
|
CacheFrom: cacheFrom,
|
||||||
CacheTo: cacheTo,
|
CacheTo: cacheTo,
|
||||||
NetworkMode: networkModeP,
|
NetworkMode: networkModeP,
|
||||||
|
Platforms: s.Build.Platforms,
|
||||||
SSH: ssh,
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
ShmSize: shmSize,
|
ShmSize: shmSize,
|
||||||
@@ -214,7 +218,7 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateCompose(dt []byte, envs map[string]string) error {
|
func validateCompose(dt []byte, envs map[string]string) error {
|
||||||
_, err := loader.Load(composetypes.ConfigDetails{
|
_, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||||
ConfigFiles: []composetypes.ConfigFile{
|
ConfigFiles: []composetypes.ConfigFile{
|
||||||
{
|
{
|
||||||
Content: dt,
|
Content: dt,
|
||||||
@@ -315,7 +319,7 @@ type (
|
|||||||
stringArray []string
|
stringArray []string
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var multi []string
|
var multi []string
|
||||||
err := unmarshal(&multi)
|
err := unmarshal(&multi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -332,7 +336,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
func (t *Target) composeExtTarget(exts map[string]any) error {
|
||||||
var xb xbake
|
var xb xbake
|
||||||
|
|
||||||
ext, ok := exts["x-bake"]
|
ext, ok := exts["x-bake"]
|
||||||
|
@@ -463,6 +463,21 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPlatforms(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
`)
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
|
}
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
func newBool(val bool) *bool {
|
||||||
b := val
|
b := val
|
||||||
return &b
|
return &b
|
||||||
@@ -798,6 +813,37 @@ services:
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServiceContext(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
base:
|
||||||
|
build:
|
||||||
|
dockerfile: baseapp.Dockerfile
|
||||||
|
command: ./entrypoint.sh
|
||||||
|
webapp:
|
||||||
|
build:
|
||||||
|
context: ./dir
|
||||||
|
additional_contexts:
|
||||||
|
base: service:base
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
|
sort.Strings(c.Groups[0].Targets)
|
||||||
|
require.Equal(t, []string{"base", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
|
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
|
||||||
|
}
|
||||||
|
|
||||||
// chdir changes the current working directory to the named directory,
|
// chdir changes the current working directory to the named directory,
|
||||||
// and then restore the original working directory at the end of the test.
|
// and then restore the original working directory at the end of the test.
|
||||||
func chdir(t *testing.T, dir string) {
|
func chdir(t *testing.T, dir string) {
|
||||||
|
@@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EntitlementKey string
|
type EntitlementKey string
|
||||||
@@ -27,6 +28,7 @@ type EntitlementKey string
|
|||||||
const (
|
const (
|
||||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||||
|
EntitlementKeyDevice EntitlementKey = "device"
|
||||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||||
EntitlementKeyFS EntitlementKey = "fs"
|
EntitlementKeyFS EntitlementKey = "fs"
|
||||||
@@ -39,6 +41,7 @@ const (
|
|||||||
type EntitlementConf struct {
|
type EntitlementConf struct {
|
||||||
NetworkHost bool
|
NetworkHost bool
|
||||||
SecurityInsecure bool
|
SecurityInsecure bool
|
||||||
|
Devices *EntitlementsDevicesConf
|
||||||
FSRead []string
|
FSRead []string
|
||||||
FSWrite []string
|
FSWrite []string
|
||||||
ImagePush []string
|
ImagePush []string
|
||||||
@@ -46,6 +49,11 @@ type EntitlementConf struct {
|
|||||||
SSH bool
|
SSH bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EntitlementsDevicesConf struct {
|
||||||
|
All bool
|
||||||
|
Devices map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||||
var conf EntitlementConf
|
var conf EntitlementConf
|
||||||
for _, e := range in {
|
for _, e := range in {
|
||||||
@@ -59,6 +67,22 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
|||||||
default:
|
default:
|
||||||
k, v, _ := strings.Cut(e, "=")
|
k, v, _ := strings.Cut(e, "=")
|
||||||
switch k {
|
switch k {
|
||||||
|
case string(EntitlementKeyDevice):
|
||||||
|
if v == "" {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
|
||||||
|
}
|
||||||
|
if conf.Devices == nil {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if conf.Devices.Devices == nil {
|
||||||
|
conf.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
conf.Devices.Devices[fields[0]] = struct{}{}
|
||||||
case string(EntitlementKeyFSRead):
|
case string(EntitlementKeyFSRead):
|
||||||
conf.FSRead = append(conf.FSRead, v)
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
case string(EntitlementKeyFSWrite):
|
case string(EntitlementKeyFSWrite):
|
||||||
@@ -95,12 +119,34 @@ func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf,
|
|||||||
|
|
||||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||||
for _, e := range bo.Allow {
|
for _, e := range bo.Allow {
|
||||||
|
k, rest, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case entitlements.EntitlementDevice.String():
|
||||||
|
if rest == "" {
|
||||||
|
if c.Devices == nil || !c.Devices.All {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(rest, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
|
||||||
|
}
|
||||||
|
if expected.Devices == nil {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if expected.Devices.Devices == nil {
|
||||||
|
expected.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
expected.Devices.Devices[fields[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
switch e {
|
switch e {
|
||||||
case entitlements.EntitlementNetworkHost:
|
case entitlements.EntitlementNetworkHost.String():
|
||||||
if !c.NetworkHost {
|
if !c.NetworkHost {
|
||||||
expected.NetworkHost = true
|
expected.NetworkHost = true
|
||||||
}
|
}
|
||||||
case entitlements.EntitlementSecurityInsecure:
|
case entitlements.EntitlementSecurityInsecure.String():
|
||||||
if !c.SecurityInsecure {
|
if !c.SecurityInsecure {
|
||||||
expected.SecurityInsecure = true
|
expected.SecurityInsecure = true
|
||||||
}
|
}
|
||||||
@@ -187,6 +233,18 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
|
|||||||
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Devices != nil {
|
||||||
|
if c.Devices.All {
|
||||||
|
msgs = append(msgs, " - Access to CDI devices")
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice))
|
||||||
|
} else {
|
||||||
|
for d := range c.Devices.Devices {
|
||||||
|
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if c.SSH {
|
if c.SSH {
|
||||||
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||||
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||||
@@ -248,7 +306,7 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
|
|||||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
args := append([]string(nil), os.Args...)
|
args := slices.Clone(os.Args)
|
||||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||||
args[0] = v
|
args[0] = v
|
||||||
}
|
}
|
||||||
|
@@ -208,8 +208,8 @@ func TestValidateEntitlements(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "NetworkHostMissing",
|
name: "NetworkHostMissing",
|
||||||
opt: build.Options{
|
opt: build.Options{
|
||||||
Allow: []entitlements.Entitlement{
|
Allow: []string{
|
||||||
entitlements.EntitlementNetworkHost,
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: EntitlementConf{
|
expected: EntitlementConf{
|
||||||
@@ -223,8 +223,8 @@ func TestValidateEntitlements(t *testing.T) {
|
|||||||
NetworkHost: true,
|
NetworkHost: true,
|
||||||
},
|
},
|
||||||
opt: build.Options{
|
opt: build.Options{
|
||||||
Allow: []entitlements.Entitlement{
|
Allow: []string{
|
||||||
entitlements.EntitlementNetworkHost,
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: EntitlementConf{
|
expected: EntitlementConf{
|
||||||
@@ -234,9 +234,9 @@ func TestValidateEntitlements(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "SecurityAndNetworkHostMissing",
|
name: "SecurityAndNetworkHostMissing",
|
||||||
opt: build.Options{
|
opt: build.Options{
|
||||||
Allow: []entitlements.Entitlement{
|
Allow: []string{
|
||||||
entitlements.EntitlementNetworkHost,
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
entitlements.EntitlementSecurityInsecure,
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: EntitlementConf{
|
expected: EntitlementConf{
|
||||||
@@ -251,9 +251,9 @@ func TestValidateEntitlements(t *testing.T) {
|
|||||||
NetworkHost: true,
|
NetworkHost: true,
|
||||||
},
|
},
|
||||||
opt: build.Options{
|
opt: build.Options{
|
||||||
Allow: []entitlements.Entitlement{
|
Allow: []string{
|
||||||
entitlements.EntitlementNetworkHost,
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
entitlements.EntitlementSecurityInsecure,
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: EntitlementConf{
|
expected: EntitlementConf{
|
||||||
|
@@ -608,7 +608,7 @@ func TestHCLAttrsCapsuleType(t *testing.T) {
|
|||||||
target "app" {
|
target "app" {
|
||||||
attest = [
|
attest = [
|
||||||
{ type = "provenance", mode = "max" },
|
{ type = "provenance", mode = "max" },
|
||||||
"type=sbom,disabled=true",
|
"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
|
||||||
]
|
]
|
||||||
|
|
||||||
cache-from = [
|
cache-from = [
|
||||||
@@ -641,7 +641,7 @@ func TestHCLAttrsCapsuleType(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true"}, stringify(c.Targets[0].Attest))
|
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
|
||||||
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
@@ -1645,7 +1645,7 @@ func TestHCLIndexOfFunc(t *testing.T) {
|
|||||||
require.Empty(t, c.Targets[1].Tags[1])
|
require.Empty(t, c.Targets[1].Tags[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func ptrstr(s interface{}) *string {
|
func ptrstr(s any) *string {
|
||||||
var n *string
|
var n *string
|
||||||
if reflect.ValueOf(s).Kind() == reflect.String {
|
if reflect.ValueOf(s).Kind() == reflect.String {
|
||||||
ss := s.(string)
|
ss := s.(string)
|
||||||
|
@@ -15,11 +15,11 @@ import (
|
|||||||
|
|
||||||
// DecodeOptions allows customizing sections of the decoding process.
|
// DecodeOptions allows customizing sections of the decoding process.
|
||||||
type DecodeOptions struct {
|
type DecodeOptions struct {
|
||||||
ImpliedType func(gv interface{}) (cty.Type, error)
|
ImpliedType func(gv any) (cty.Type, error)
|
||||||
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
o = o.withDefaults()
|
o = o.withDefaults()
|
||||||
|
|
||||||
rv := reflect.ValueOf(val)
|
rv := reflect.ValueOf(val)
|
||||||
@@ -46,7 +46,7 @@ func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val inter
|
|||||||
// are returned then the given value may have been partially-populated but
|
// are returned then the given value may have been partially-populated but
|
||||||
// may still be accessed by a careful caller for static analysis and editor
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
// integration use-cases.
|
// integration use-cases.
|
||||||
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,7 +282,7 @@ func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext
|
|||||||
return diags
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
o = o.withDefaults()
|
o = o.withDefaults()
|
||||||
|
|
||||||
srcVal, diags := expr.Value(ctx)
|
srcVal, diags := expr.Value(ctx)
|
||||||
@@ -332,7 +332,7 @@ func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContex
|
|||||||
// are returned then the given value may have been partially-populated but
|
// are returned then the given value may have been partially-populated but
|
||||||
// may still be accessed by a careful caller for static analysis and editor
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
// integration use-cases.
|
// integration use-cases.
|
||||||
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -16,8 +16,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDecodeBody(t *testing.T) {
|
func TestDecodeBody(t *testing.T) {
|
||||||
deepEquals := func(other interface{}) func(v interface{}) bool {
|
deepEquals := func(other any) func(v any) bool {
|
||||||
return func(v interface{}) bool {
|
return func(v any) bool {
|
||||||
return reflect.DeepEqual(v, other)
|
return reflect.DeepEqual(v, other)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -45,19 +45,19 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
Body map[string]interface{}
|
Body map[string]any
|
||||||
Target func() interface{}
|
Target func() any
|
||||||
Check func(v interface{}) bool
|
Check func(v any) bool
|
||||||
DiagCount int
|
DiagCount int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(struct{}{}),
|
makeInstantiateType(struct{}{}),
|
||||||
deepEquals(struct{}{}),
|
deepEquals(struct{}{}),
|
||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
}{}),
|
}{}),
|
||||||
@@ -67,7 +67,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
1, // name is required
|
1, // name is required
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Name *string `hcl:"name"`
|
Name *string `hcl:"name"`
|
||||||
}{}),
|
}{}),
|
||||||
@@ -77,7 +77,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
}, // name nil
|
}, // name nil
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Name string `hcl:"name,optional"`
|
Name string `hcl:"name,optional"`
|
||||||
}{}),
|
}{}),
|
||||||
@@ -87,9 +87,9 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
}, // name optional
|
}, // name optional
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(withNameExpression{}),
|
makeInstantiateType(withNameExpression{}),
|
||||||
func(v interface{}) bool {
|
func(v any) bool {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -109,11 +109,11 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
},
|
},
|
||||||
makeInstantiateType(withNameExpression{}),
|
makeInstantiateType(withNameExpression{}),
|
||||||
func(v interface{}) bool {
|
func(v any) bool {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -133,7 +133,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
@@ -145,7 +145,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 23,
|
"age": 23,
|
||||||
},
|
},
|
||||||
@@ -158,7 +158,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
1, // Extraneous "age" property
|
1, // Extraneous "age" property
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 50,
|
"age": 50,
|
||||||
},
|
},
|
||||||
@@ -166,7 +166,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
Attrs hcl.Attributes `hcl:",remain"`
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
got := gotI.(struct {
|
got := gotI.(struct {
|
||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
Attrs hcl.Attributes `hcl:",remain"`
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
@@ -176,7 +176,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 50,
|
"age": 50,
|
||||||
},
|
},
|
||||||
@@ -184,7 +184,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
Remain hcl.Body `hcl:",remain"`
|
Remain hcl.Body `hcl:",remain"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
got := gotI.(struct {
|
got := gotI.(struct {
|
||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
Remain hcl.Body `hcl:",remain"`
|
Remain hcl.Body `hcl:",remain"`
|
||||||
@@ -197,7 +197,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"living": true,
|
"living": true,
|
||||||
},
|
},
|
||||||
@@ -217,7 +217,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 50,
|
"age": 50,
|
||||||
},
|
},
|
||||||
@@ -226,7 +226,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Body hcl.Body `hcl:",body"`
|
Body hcl.Body `hcl:",body"`
|
||||||
Remain hcl.Body `hcl:",remain"`
|
Remain hcl.Body `hcl:",remain"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
got := gotI.(struct {
|
got := gotI.(struct {
|
||||||
Name string `hcl:"name"`
|
Name string `hcl:"name"`
|
||||||
Body hcl.Body `hcl:",body"`
|
Body hcl.Body `hcl:",body"`
|
||||||
@@ -241,76 +241,76 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{},
|
"noodle": map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct{} `hcl:"noodle,block"`
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating no diagnostics is good enough for this one.
|
// Generating no diagnostics is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}},
|
"noodle": []map[string]any{{}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct{} `hcl:"noodle,block"`
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating no diagnostics is good enough for this one.
|
// Generating no diagnostics is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}, {}},
|
"noodle": []map[string]any{{}, {}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct{} `hcl:"noodle,block"`
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating one diagnostic is good enough for this one.
|
// Generating one diagnostic is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{},
|
map[string]any{},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct{} `hcl:"noodle,block"`
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating one diagnostic is good enough for this one.
|
// Generating one diagnostic is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{},
|
"noodle": []map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct{} `hcl:"noodle,block"`
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating one diagnostic is good enough for this one.
|
// Generating one diagnostic is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{},
|
"noodle": map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
return gotI.(struct {
|
return gotI.(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle != nil
|
}).Noodle != nil
|
||||||
@@ -318,13 +318,13 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}},
|
"noodle": []map[string]any{{}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
return gotI.(struct {
|
return gotI.(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle != nil
|
}).Noodle != nil
|
||||||
@@ -332,13 +332,13 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{},
|
"noodle": []map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
return gotI.(struct {
|
return gotI.(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle == nil
|
}).Noodle == nil
|
||||||
@@ -346,26 +346,26 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}, {}},
|
"noodle": []map[string]any{{}, {}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle *struct{} `hcl:"noodle,block"`
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// Generating one diagnostic is good enough for this one.
|
// Generating one diagnostic is good enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{},
|
"noodle": []map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodle := gotI.(struct {
|
noodle := gotI.(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle
|
}).Noodle
|
||||||
@@ -374,13 +374,13 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}},
|
"noodle": []map[string]any{{}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodle := gotI.(struct {
|
noodle := gotI.(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle
|
}).Noodle
|
||||||
@@ -389,13 +389,13 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": []map[string]interface{}{{}, {}},
|
"noodle": []map[string]any{{}, {}},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodle := gotI.(struct {
|
noodle := gotI.(struct {
|
||||||
Noodle []struct{} `hcl:"noodle,block"`
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
}).Noodle
|
}).Noodle
|
||||||
@@ -404,15 +404,15 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{},
|
"noodle": map[string]any{},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
Noodle struct {
|
Noodle struct {
|
||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
} `hcl:"noodle,block"`
|
} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
//nolint:misspell
|
//nolint:misspell
|
||||||
// Generating two diagnostics is good enough for this one.
|
// Generating two diagnostics is good enough for this one.
|
||||||
// (one for the missing noodle block and the other for
|
// (one for the missing noodle block and the other for
|
||||||
@@ -423,9 +423,9 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
2,
|
2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{
|
"noodle": map[string]any{
|
||||||
"foo_foo": map[string]interface{}{},
|
"foo_foo": map[string]any{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
@@ -433,7 +433,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
} `hcl:"noodle,block"`
|
} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodle := gotI.(struct {
|
noodle := gotI.(struct {
|
||||||
Noodle struct {
|
Noodle struct {
|
||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
@@ -444,10 +444,10 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{
|
"noodle": map[string]any{
|
||||||
"foo_foo": map[string]interface{}{},
|
"foo_foo": map[string]any{},
|
||||||
"bar_baz": map[string]interface{}{},
|
"bar_baz": map[string]any{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
@@ -455,17 +455,17 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
} `hcl:"noodle,block"`
|
} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
// One diagnostic is enough for this one.
|
// One diagnostic is enough for this one.
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{
|
"noodle": map[string]any{
|
||||||
"foo_foo": map[string]interface{}{},
|
"foo_foo": map[string]any{},
|
||||||
"bar_baz": map[string]interface{}{},
|
"bar_baz": map[string]any{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
makeInstantiateType(struct {
|
makeInstantiateType(struct {
|
||||||
@@ -473,7 +473,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
} `hcl:"noodle,block"`
|
} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodles := gotI.(struct {
|
noodles := gotI.(struct {
|
||||||
Noodles []struct {
|
Noodles []struct {
|
||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
@@ -484,9 +484,9 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"noodle": map[string]interface{}{
|
"noodle": map[string]any{
|
||||||
"foo_foo": map[string]interface{}{
|
"foo_foo": map[string]any{
|
||||||
"type": "rice",
|
"type": "rice",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -497,7 +497,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
Type string `hcl:"type"`
|
Type string `hcl:"type"`
|
||||||
} `hcl:"noodle,block"`
|
} `hcl:"noodle,block"`
|
||||||
}{}),
|
}{}),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
noodle := gotI.(struct {
|
noodle := gotI.(struct {
|
||||||
Noodle struct {
|
Noodle struct {
|
||||||
Name string `hcl:"name,label"`
|
Name string `hcl:"name,label"`
|
||||||
@@ -510,7 +510,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 34,
|
"age": 34,
|
||||||
},
|
},
|
||||||
@@ -522,31 +522,31 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 89,
|
"age": 89,
|
||||||
},
|
},
|
||||||
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
got := gotI.(map[string]*hcl.Attribute)
|
got := gotI.(map[string]*hcl.Attribute)
|
||||||
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
},
|
},
|
||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"age": 13,
|
"age": 13,
|
||||||
},
|
},
|
||||||
makeInstantiateType(map[string]hcl.Expression(nil)),
|
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
got := gotI.(map[string]hcl.Expression)
|
got := gotI.(map[string]hcl.Expression)
|
||||||
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
},
|
},
|
||||||
0,
|
0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"name": "Ermintrude",
|
"name": "Ermintrude",
|
||||||
"living": true,
|
"living": true,
|
||||||
},
|
},
|
||||||
@@ -559,10 +559,10 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Retain "nested" block while decoding
|
// Retain "nested" block while decoding
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"plain": "foo",
|
"plain": "foo",
|
||||||
},
|
},
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &withNestedBlock{
|
return &withNestedBlock{
|
||||||
Plain: "bar",
|
Plain: "bar",
|
||||||
Nested: &withTwoAttributes{
|
Nested: &withTwoAttributes{
|
||||||
@@ -570,7 +570,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
foo := gotI.(withNestedBlock)
|
foo := gotI.(withNestedBlock)
|
||||||
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||||
},
|
},
|
||||||
@@ -578,19 +578,19 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Retain values in "nested" block while decoding
|
// Retain values in "nested" block while decoding
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"nested": map[string]interface{}{
|
"nested": map[string]any{
|
||||||
"a": "foo",
|
"a": "foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &withNestedBlock{
|
return &withNestedBlock{
|
||||||
Nested: &withTwoAttributes{
|
Nested: &withTwoAttributes{
|
||||||
B: "bar",
|
B: "bar",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
foo := gotI.(withNestedBlock)
|
foo := gotI.(withNestedBlock)
|
||||||
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||||
},
|
},
|
||||||
@@ -598,14 +598,14 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Retain values in "nested" block list while decoding
|
// Retain values in "nested" block list while decoding
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"nested": []map[string]interface{}{
|
"nested": []map[string]any{
|
||||||
{
|
{
|
||||||
"a": "foo",
|
"a": "foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &withListofNestedBlocks{
|
return &withListofNestedBlocks{
|
||||||
Nested: []*withTwoAttributes{
|
Nested: []*withTwoAttributes{
|
||||||
{
|
{
|
||||||
@@ -614,7 +614,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
n := gotI.(withListofNestedBlocks)
|
n := gotI.(withListofNestedBlocks)
|
||||||
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||||
},
|
},
|
||||||
@@ -622,14 +622,14 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Remove additional elements from the list while decoding nested blocks
|
// Remove additional elements from the list while decoding nested blocks
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"nested": []map[string]interface{}{
|
"nested": []map[string]any{
|
||||||
{
|
{
|
||||||
"a": "foo",
|
"a": "foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &withListofNestedBlocks{
|
return &withListofNestedBlocks{
|
||||||
Nested: []*withTwoAttributes{
|
Nested: []*withTwoAttributes{
|
||||||
{
|
{
|
||||||
@@ -641,7 +641,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
n := gotI.(withListofNestedBlocks)
|
n := gotI.(withListofNestedBlocks)
|
||||||
return len(n.Nested) == 1
|
return len(n.Nested) == 1
|
||||||
},
|
},
|
||||||
@@ -649,8 +649,8 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Make sure decoding value slices works the same as pointer slices.
|
// Make sure decoding value slices works the same as pointer slices.
|
||||||
map[string]interface{}{
|
map[string]any{
|
||||||
"nested": []map[string]interface{}{
|
"nested": []map[string]any{
|
||||||
{
|
{
|
||||||
"b": "bar",
|
"b": "bar",
|
||||||
},
|
},
|
||||||
@@ -659,7 +659,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &withListofNestedBlocksNoPointers{
|
return &withListofNestedBlocksNoPointers{
|
||||||
Nested: []withTwoAttributes{
|
Nested: []withTwoAttributes{
|
||||||
{
|
{
|
||||||
@@ -668,7 +668,7 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
func(gotI interface{}) bool {
|
func(gotI any) bool {
|
||||||
n := gotI.(withListofNestedBlocksNoPointers)
|
n := gotI.(withListofNestedBlocksNoPointers)
|
||||||
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||||
},
|
},
|
||||||
@@ -710,8 +710,8 @@ func TestDecodeBody(t *testing.T) {
|
|||||||
func TestDecodeExpression(t *testing.T) {
|
func TestDecodeExpression(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
Value cty.Value
|
Value cty.Value
|
||||||
Target interface{}
|
Target any
|
||||||
Want interface{}
|
Want any
|
||||||
DiagCount int
|
DiagCount int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@@ -799,8 +799,8 @@ func (e *fixedExpression) Variables() []hcl.Traversal {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeInstantiateType(target interface{}) func() interface{} {
|
func makeInstantiateType(target any) func() any {
|
||||||
return func() interface{} {
|
return func() any {
|
||||||
return reflect.New(reflect.TypeOf(target)).Interface()
|
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -34,9 +34,9 @@ import (
|
|||||||
// The layout of the resulting HCL source is derived from the ordering of
|
// The layout of the resulting HCL source is derived from the ordering of
|
||||||
// the struct fields, with blank lines around nested blocks of different types.
|
// the struct fields, with blank lines around nested blocks of different types.
|
||||||
// Fields representing attributes should usually precede those representing
|
// Fields representing attributes should usually precede those representing
|
||||||
// blocks so that the attributes can group togather in the result. For more
|
// blocks so that the attributes can group together in the result. For more
|
||||||
// control, use the hclwrite API directly.
|
// control, use the hclwrite API directly.
|
||||||
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
func EncodeIntoBody(val any, dst *hclwrite.Body) {
|
||||||
rv := reflect.ValueOf(val)
|
rv := reflect.ValueOf(val)
|
||||||
ty := rv.Type()
|
ty := rv.Type()
|
||||||
if ty.Kind() == reflect.Ptr {
|
if ty.Kind() == reflect.Ptr {
|
||||||
@@ -60,7 +60,7 @@ func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
|||||||
//
|
//
|
||||||
// This function has the same constraints as EncodeIntoBody and will panic
|
// This function has the same constraints as EncodeIntoBody and will panic
|
||||||
// if they are violated.
|
// if they are violated.
|
||||||
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
|
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
|
||||||
rv := reflect.ValueOf(val)
|
rv := reflect.ValueOf(val)
|
||||||
ty := rv.Type()
|
ty := rv.Type()
|
||||||
if ty.Kind() == reflect.Ptr {
|
if ty.Kind() == reflect.Ptr {
|
||||||
@@ -158,7 +158,7 @@ func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwr
|
|||||||
|
|
||||||
if isSeq {
|
if isSeq {
|
||||||
l := fieldVal.Len()
|
l := fieldVal.Len()
|
||||||
for i := 0; i < l; i++ {
|
for i := range l {
|
||||||
elemVal := fieldVal.Index(i)
|
elemVal := fieldVal.Index(i)
|
||||||
if !elemVal.IsValid() {
|
if !elemVal.IsValid() {
|
||||||
continue // ignore (elem value is nil pointer)
|
continue // ignore (elem value is nil pointer)
|
||||||
|
@@ -22,7 +22,7 @@ import (
|
|||||||
// This uses the tags on the fields of the struct to discover how each
|
// This uses the tags on the fields of the struct to discover how each
|
||||||
// field's value should be expressed within configuration. If an invalid
|
// field's value should be expressed within configuration. If an invalid
|
||||||
// mapping is attempted, this function will panic.
|
// mapping is attempted, this function will panic.
|
||||||
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
|
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
|
||||||
ty := reflect.TypeOf(val)
|
ty := reflect.TypeOf(val)
|
||||||
|
|
||||||
if ty.Kind() == reflect.Ptr {
|
if ty.Kind() == reflect.Ptr {
|
||||||
@@ -134,7 +134,7 @@ func getFieldTags(ty reflect.Type) *fieldTags {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ct := ty.NumField()
|
ct := ty.NumField()
|
||||||
for i := 0; i < ct; i++ {
|
for i := range ct {
|
||||||
field := ty.Field(i)
|
field := ty.Field(i)
|
||||||
tag := field.Tag.Get("hcl")
|
tag := field.Tag.Get("hcl")
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
|
@@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
func TestImpliedBodySchema(t *testing.T) {
|
func TestImpliedBodySchema(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
val interface{}
|
val any
|
||||||
wantSchema *hcl.BodySchema
|
wantSchema *hcl.BodySchema
|
||||||
wantPartial bool
|
wantPartial bool
|
||||||
}{
|
}{
|
||||||
|
@@ -7,6 +7,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -15,6 +16,7 @@ import (
|
|||||||
"github.com/hashicorp/hcl/v2"
|
"github.com/hashicorp/hcl/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
@@ -40,7 +42,7 @@ type variableValidation struct {
|
|||||||
type functionDef struct {
|
type functionDef struct {
|
||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
||||||
Variadic *hcl.Attribute `json:"variadic_param,omitempty" hcl:"variadic_params"`
|
Variadic *hcl.Attribute `json:"variadic_params,omitempty" hcl:"variadic_params"`
|
||||||
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
|
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,27 +556,57 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
|
|||||||
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
|
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
|
||||||
var diags hcl.Diagnostics
|
var diags hcl.Diagnostics
|
||||||
for _, v := range vars {
|
for _, v := range vars {
|
||||||
for _, validation := range v.Validations {
|
for _, rule := range v.Validations {
|
||||||
condition, condDiags := validation.Condition.Value(ectx)
|
resultVal, condDiags := rule.Condition.Value(ectx)
|
||||||
if condDiags.HasErrors() {
|
if condDiags.HasErrors() {
|
||||||
diags = append(diags, condDiags...)
|
diags = append(diags, condDiags...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !condition.True() {
|
|
||||||
message, msgDiags := validation.ErrorMessage.Value(ectx)
|
if resultVal.IsNull() {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid condition result",
|
||||||
|
Detail: "Condition expression must return either true or false, not null.",
|
||||||
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
|
Expression: rule.Condition,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
resultVal, err = convert.Convert(resultVal, cty.Bool)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid condition result",
|
||||||
|
Detail: fmt.Sprintf("Invalid condition result value: %s", err),
|
||||||
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
|
Expression: rule.Condition,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !resultVal.True() {
|
||||||
|
message, msgDiags := rule.ErrorMessage.Value(ectx)
|
||||||
if msgDiags.HasErrors() {
|
if msgDiags.HasErrors() {
|
||||||
diags = append(diags, msgDiags...)
|
diags = append(diags, msgDiags...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
errorMessage := "This check failed, but has an invalid error message."
|
||||||
|
if !message.IsNull() {
|
||||||
|
errorMessage = message.AsString()
|
||||||
|
}
|
||||||
diags = append(diags, &hcl.Diagnostic{
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
Severity: hcl.DiagError,
|
Severity: hcl.DiagError,
|
||||||
Summary: "Validation failed",
|
Summary: "Validation failed",
|
||||||
Detail: message.AsString(),
|
Detail: errorMessage,
|
||||||
Subject: validation.Condition.Range().Ptr(),
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return diags
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -589,7 +621,7 @@ type ParseMeta struct {
|
|||||||
AllVariables []*Variable
|
AllVariables []*Variable
|
||||||
}
|
}
|
||||||
|
|
||||||
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
|
||||||
reserved := map[string]struct{}{}
|
reserved := map[string]struct{}{}
|
||||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||||
|
|
||||||
@@ -763,7 +795,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
types := map[string]field{}
|
types := map[string]field{}
|
||||||
renamed := map[string]map[string][]string{}
|
renamed := map[string]map[string][]string{}
|
||||||
vt := reflect.ValueOf(val).Elem().Type()
|
vt := reflect.ValueOf(val).Elem().Type()
|
||||||
for i := 0; i < vt.NumField(); i++ {
|
for i := range vt.NumField() {
|
||||||
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
||||||
|
|
||||||
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
||||||
@@ -831,7 +863,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
oldValue, exists := t.values[lblName]
|
oldValue, exists := t.values[lblName]
|
||||||
if !exists && lblExists {
|
if !exists && lblExists {
|
||||||
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
||||||
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
|
for i := range v.Elem().Field(t.idx).Len() {
|
||||||
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
||||||
exists = true
|
exists = true
|
||||||
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
||||||
@@ -898,7 +930,7 @@ func wrapErrorDiagnostic(message string, err error, subject *hcl.Range, context
|
|||||||
|
|
||||||
func setName(v reflect.Value, name string) {
|
func setName(v reflect.Value, name string) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
for _, t := range parts[1:] {
|
||||||
if t == "label" {
|
if t == "label" {
|
||||||
@@ -910,12 +942,10 @@ func setName(v reflect.Value, name string) {
|
|||||||
|
|
||||||
func getName(v reflect.Value) (string, bool) {
|
func getName(v reflect.Value) (string, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
return v.Elem().Field(i).String(), true
|
||||||
return v.Elem().Field(i).String(), true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
@@ -923,12 +953,10 @@ func getName(v reflect.Value) (string, bool) {
|
|||||||
|
|
||||||
func getNameIndex(v reflect.Value) (int, bool) {
|
func getNameIndex(v reflect.Value) (int, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
return i, true
|
||||||
return i, true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, false
|
return 0, false
|
||||||
@@ -988,7 +1016,7 @@ func key(ks ...any) uint64 {
|
|||||||
return hash.Sum64()
|
return hash.Sum64()
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
|
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
|
||||||
return dec.DecodeBody(body, ctx, val)
|
return dec.DecodeBody(body, ctx, val)
|
||||||
}
|
}
|
||||||
|
@@ -43,7 +43,7 @@ import (
|
|||||||
// In particular, ImpliedType will never use capsule types in its returned
|
// In particular, ImpliedType will never use capsule types in its returned
|
||||||
// type, because it cannot know the capsule types supported by the calling
|
// type, because it cannot know the capsule types supported by the calling
|
||||||
// program.
|
// program.
|
||||||
func ImpliedType(gv interface{}) (cty.Type, error) {
|
func ImpliedType(gv any) (cty.Type, error) {
|
||||||
rt := reflect.TypeOf(gv)
|
rt := reflect.TypeOf(gv)
|
||||||
var path cty.Path
|
var path cty.Path
|
||||||
return impliedType(rt, path)
|
return impliedType(rt, path)
|
||||||
@@ -148,7 +148,7 @@ func structTagIndices(st reflect.Type) map[string]int {
|
|||||||
ct := st.NumField()
|
ct := st.NumField()
|
||||||
ret := make(map[string]int, ct)
|
ret := make(map[string]int, ct)
|
||||||
|
|
||||||
for i := 0; i < ct; i++ {
|
for i := range ct {
|
||||||
field := st.Field(i)
|
field := st.Field(i)
|
||||||
attrName := field.Tag.Get("cty")
|
attrName := field.Tag.Get("cty")
|
||||||
if attrName != "" {
|
if attrName != "" {
|
||||||
|
@@ -8,6 +8,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -40,7 +41,6 @@ import (
|
|||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
"github.com/moby/buildkit/util/tracing"
|
"github.com/moby/buildkit/util/tracing"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
@@ -63,7 +63,7 @@ type Options struct {
|
|||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
|
||||||
Ref string
|
Ref string
|
||||||
Allow []entitlements.Entitlement
|
Allow []string
|
||||||
Attests map[string]*string
|
Attests map[string]*string
|
||||||
BuildArgs map[string]string
|
BuildArgs map[string]string
|
||||||
CacheFrom []client.CacheOptionsEntry
|
CacheFrom []client.CacheOptionsEntry
|
||||||
@@ -205,15 +205,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defers := make([]func(), 0, 2)
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
for _, f := range defers {
|
|
||||||
f()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
reqForNodes := make(map[string][]*reqForNode)
|
reqForNodes := make(map[string][]*reqForNode)
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
@@ -243,11 +234,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer release()
|
||||||
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
|
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
addGitAttrs(so)
|
addGitAttrs(so)
|
||||||
defers = append(defers, release)
|
|
||||||
reqn = append(reqn, &reqForNode{
|
reqn = append(reqn, &reqForNode{
|
||||||
resolvedNode: np,
|
resolvedNode: np,
|
||||||
so: so,
|
so: so,
|
||||||
@@ -432,9 +423,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
FrontendInputs: frontendInputs,
|
FrontendInputs: frontendInputs,
|
||||||
FrontendOpt: make(map[string]string),
|
FrontendOpt: make(map[string]string),
|
||||||
}
|
}
|
||||||
for k, v := range so.FrontendAttrs {
|
maps.Copy(req.FrontendOpt, so.FrontendAttrs)
|
||||||
req.FrontendOpt[k] = v
|
|
||||||
}
|
|
||||||
so.Frontend = ""
|
so.Frontend = ""
|
||||||
so.FrontendInputs = nil
|
so.FrontendInputs = nil
|
||||||
|
|
||||||
@@ -536,11 +525,10 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
node := dp.Node().Driver
|
node := dp.Node().Driver
|
||||||
if node.IsMobyDriver() {
|
if node.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
if e.Type == "moby" && e.Attrs["push"] != "" {
|
if e.Type == "moby" && e.Attrs["push"] != "" && !node.Features(ctx)[driver.DirectPush] {
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
pushNames = e.Attrs["name"]
|
pushNames = e.Attrs["name"]
|
||||||
if pushNames == "" {
|
if pushNames == "" {
|
||||||
@@ -572,6 +560,14 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// if prefer-image-digest is set in the solver options, remove the image
|
||||||
|
// config digest from the exporter's response
|
||||||
|
for _, e := range so.Exports {
|
||||||
|
if e.Attrs["prefer-image-digest"] == "true" {
|
||||||
|
delete(rr.ExporterResponse, exptypes.ExporterImageConfigDigestKey)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -623,7 +619,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
// This is fallback for some very old buildkit versions.
|
// This is fallback for some very old buildkit versions.
|
||||||
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
||||||
// not manifest list but actually both are handled because for Docker mediatypes the
|
// not manifest list but actually both are handled because for Docker mediatypes the
|
||||||
// mediatype value in the Accpet header does not seem to matter.
|
// mediatype value in the Accept header does not seem to matter.
|
||||||
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||||
if ok {
|
if ok {
|
||||||
descs = append(descs, specs.Descriptor{
|
descs = append(descs, specs.Descriptor{
|
||||||
@@ -835,7 +831,7 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
img, _, err := api.ImageInspectWithRaw(ctx, name)
|
img, err := api.ImageInspect(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"net"
|
"net"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
@@ -37,15 +38,7 @@ func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platfor
|
|||||||
for _, ls := range resolved {
|
for _, ls := range resolved {
|
||||||
for _, rn := range ls {
|
for _, rn := range ls {
|
||||||
if platform != nil {
|
if platform != nil {
|
||||||
p := *platform
|
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
|
||||||
var found bool
|
|
||||||
for _, pp := range rn.platforms {
|
|
||||||
if platforms.Only(p).Match(pp) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -3,6 +3,7 @@ package build
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
@@ -221,7 +222,7 @@ func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatf
|
|||||||
for i, node := range r.nodes {
|
for i, node := range r.nodes {
|
||||||
platforms := node.Platforms
|
platforms := node.Platforms
|
||||||
if additionalPlatforms != nil {
|
if additionalPlatforms != nil {
|
||||||
platforms = append([]specs.Platform{}, platforms...)
|
platforms = slices.Clone(platforms)
|
||||||
platforms = append(platforms, additionalPlatforms(i, node)...)
|
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||||
}
|
}
|
||||||
for _, p2 := range platforms {
|
for _, p2 := range platforms {
|
||||||
|
@@ -2,6 +2,7 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -127,9 +128,7 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
|
|||||||
if so.FrontendAttrs == nil {
|
if so.FrontendAttrs == nil {
|
||||||
so.FrontendAttrs = make(map[string]string)
|
so.FrontendAttrs = make(map[string]string)
|
||||||
}
|
}
|
||||||
for k, v := range res {
|
maps.Copy(so.FrontendAttrs, res)
|
||||||
so.FrontendAttrs[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
if !setGitInfo || root == "" {
|
if !setGitInfo || root == "" {
|
||||||
return
|
return
|
||||||
|
@@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/gitutil"
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/buildx/util/gitutil/gittestutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -16,18 +17,18 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func setupTest(tb testing.TB) {
|
func setupTest(tb testing.TB) {
|
||||||
gitutil.Mktmp(tb)
|
gittestutil.Mktmp(tb)
|
||||||
|
|
||||||
c, err := gitutil.New()
|
c, err := gitutil.New()
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
gitutil.GitInit(c, tb)
|
gittestutil.GitInit(c, tb)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, tb, "Dockerfile")
|
gittestutil.GitAdd(c, tb, "Dockerfile")
|
||||||
gitutil.GitCommit(c, tb, "initial commit")
|
gittestutil.GitCommit(c, tb, "initial commit")
|
||||||
gitutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
|
gittestutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||||
@@ -188,19 +189,19 @@ func TestLocalDirs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLocalDirsSub(t *testing.T) {
|
func TestLocalDirsSub(t *testing.T) {
|
||||||
gitutil.Mktmp(t)
|
gittestutil.Mktmp(t)
|
||||||
|
|
||||||
c, err := gitutil.New()
|
c, err := gitutil.New()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
gitutil.GitInit(c, t)
|
gittestutil.GitInit(c, t)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
require.NoError(t, os.MkdirAll("app", 0755))
|
require.NoError(t, os.MkdirAll("app", 0755))
|
||||||
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, t, "app/Dockerfile")
|
gittestutil.GitAdd(c, t, "app/Dockerfile")
|
||||||
gitutil.GitCommit(c, t, "initial commit")
|
gittestutil.GitCommit(c, t, "initial commit")
|
||||||
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
gittestutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
||||||
|
|
||||||
so := &client.SolveOpt{
|
so := &client.SolveOpt{
|
||||||
FrontendAttrs: map[string]string{},
|
FrontendAttrs: map[string]string{},
|
||||||
|
@@ -237,6 +237,11 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
|
|||||||
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
// if docker is using the containerd snapshotter, prefer to export the image digest
|
||||||
|
// (rather than the image config digest). See https://github.com/moby/moby/issues/45458.
|
||||||
|
if features[dockerutil.OCIImporter] {
|
||||||
|
opt.Exports[i].Attrs["prefer-image-digest"] = "true"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||||
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||||
@@ -318,7 +323,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
|
|||||||
switch opt.NetworkMode {
|
switch opt.NetworkMode {
|
||||||
case "host":
|
case "host":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
|
||||||
case "none":
|
case "none":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -40,9 +41,7 @@ func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.Solve
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for k, v := range res {
|
maps.Copy(sr.ExporterResponse, res)
|
||||||
sr.ExporterResponse[k] = v
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -28,11 +28,11 @@ func TestSyncMultiReaderParallel(t *testing.T) {
|
|||||||
|
|
||||||
readers := make([]io.ReadCloser, numReaders)
|
readers := make([]io.ReadCloser, numReaders)
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
readers[i] = mr.NewReadCloser()
|
readers[i] = mr.NewReadCloser()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(readerId int) {
|
go func(readerId int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -199,7 +200,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && len(errCh) == len(toBoot) {
|
if err == nil && len(errCh) > 0 {
|
||||||
return false, <-errCh
|
return false, <-errCh
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
@@ -656,13 +657,7 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
|
|||||||
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||||
_ = flags.Parse(res)
|
_ = flags.Parse(res)
|
||||||
|
|
||||||
var hasNetworkHostEntitlement bool
|
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
|
||||||
for _, e := range allowInsecureEntitlements {
|
|
||||||
if e == "network.host" {
|
|
||||||
hasNetworkHostEntitlement = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var hasNetworkHostEntitlementInConf bool
|
var hasNetworkHostEntitlementInConf bool
|
||||||
if buildkitdConfigFile != "" {
|
if buildkitdConfigFile != "" {
|
||||||
@@ -671,11 +666,8 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
|
|||||||
return nil, err
|
return nil, err
|
||||||
} else if btoml != nil {
|
} else if btoml != nil {
|
||||||
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||||
for _, e := range ies.([]string) {
|
if slices.Contains(ies.([]string), "network.host") {
|
||||||
if e == "network.host" {
|
hasNetworkHostEntitlementInConf = true
|
||||||
hasNetworkHostEntitlementInConf = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -169,7 +169,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
|||||||
// dynamic nodes are used in Kubernetes driver.
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
||||||
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
||||||
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
|
for i := range di.DriverInfo.DynamicNodes {
|
||||||
diClone := di
|
diClone := di
|
||||||
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
diClone.Platforms = pl
|
diClone.Platforms = pl
|
||||||
|
@@ -7,15 +7,13 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli-plugins/manager"
|
"github.com/docker/cli/cli-plugins/metadata"
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -37,11 +35,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runStandalone(cmd *command.DockerCli) error {
|
func runStandalone(cmd *command.DockerCli) error {
|
||||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer flushMetrics(cmd)
|
defer flushMetrics(cmd)
|
||||||
|
|
||||||
executable := os.Args[0]
|
executable := os.Args[0]
|
||||||
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
||||||
return rootCmd.Execute()
|
return rootCmd.Execute()
|
||||||
@@ -64,7 +58,7 @@ func flushMetrics(cmd *command.DockerCli) {
|
|||||||
|
|
||||||
func runPlugin(cmd *command.DockerCli) error {
|
func runPlugin(cmd *command.DockerCli) error {
|
||||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
||||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
return plugin.RunPlugin(cmd, rootCmd, metadata.Metadata{
|
||||||
SchemaVersion: "0.1.0",
|
SchemaVersion: "0.1.0",
|
||||||
Vendor: "Docker Inc.",
|
Vendor: "Docker Inc.",
|
||||||
Version: version.Version,
|
Version: version.Version,
|
||||||
@@ -117,11 +111,6 @@ func main() {
|
|||||||
var ebr *desktop.ErrorWithBuildRef
|
var ebr *desktop.ErrorWithBuildRef
|
||||||
if errors.As(err, &ebr) {
|
if errors.As(err, &ebr) {
|
||||||
ebr.Print(cmd.Err())
|
ebr.Print(cmd.Err())
|
||||||
} else {
|
|
||||||
var be *controllererrors.BuildError
|
|
||||||
if errors.As(err, &be) {
|
|
||||||
be.PrintBuildDetails(cmd.Err())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@@ -66,7 +66,11 @@ type bakeOptions struct {
|
|||||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
|
||||||
|
attribute.String("builder", in.builder),
|
||||||
|
attribute.StringSlice("targets", targets),
|
||||||
|
attribute.StringSlice("files", in.files),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -283,7 +287,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -305,7 +309,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||||
}
|
}
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 {
|
||||||
dt := make(map[string]interface{})
|
dt := make(map[string]any)
|
||||||
for t, r := range resp {
|
for t, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||||
}
|
}
|
||||||
@@ -420,6 +424,14 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
if sp, ok := resp[name]; ok {
|
||||||
|
if v, ok := sp.ExporterResponse["frontend.result.inlinemessage"]; ok {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n# %s\n%s\n", name, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if exitCode != 0 {
|
if exitCode != 0 {
|
||||||
os.Exit(exitCode)
|
os.Exit(exitCode)
|
||||||
}
|
}
|
||||||
@@ -488,7 +500,14 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
|
||||||
|
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer l.MigrateIfNeeded()
|
||||||
|
|
||||||
prm := confutil.MetadataProvenance()
|
prm := confutil.MetadataProvenance()
|
||||||
if len(in.metadataFile) == 0 {
|
if len(in.metadataFile) == 0 {
|
||||||
prm = confutil.MetadataProvenanceModeDisabled
|
prm = confutil.MetadataProvenanceModeDisabled
|
||||||
@@ -508,19 +527,10 @@ func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string
|
|||||||
if len(refs) == 0 {
|
if len(refs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||||
Definition: dtdef,
|
Refs: refs,
|
||||||
Targets: targets,
|
Targets: targets,
|
||||||
Inputs: overrides,
|
|
||||||
Refs: refs,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -11,6 +11,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -41,7 +42,6 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||||
"github.com/moby/buildkit/frontend/subrequests"
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
@@ -52,6 +52,7 @@ import (
|
|||||||
solverpb "github.com/moby/buildkit/solver/pb"
|
solverpb "github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
"github.com/moby/buildkit/util/progress/progressui"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/moby/sys/atomicwriter"
|
||||||
"github.com/morikuni/aec"
|
"github.com/morikuni/aec"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -103,12 +104,10 @@ type buildOptions struct {
|
|||||||
exportPush bool
|
exportPush bool
|
||||||
exportLoad bool
|
exportLoad bool
|
||||||
|
|
||||||
control.ControlOptions
|
|
||||||
|
|
||||||
invokeConfig *invokeConfig
|
invokeConfig *invokeConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error) {
|
func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
buildArgs, err := listToMap(o.buildArgs, true)
|
buildArgs, err := listToMap(o.buildArgs, true)
|
||||||
@@ -121,7 +120,7 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := controllerapi.BuildOptions{
|
opts := cbuild.Options{
|
||||||
Allow: o.allow,
|
Allow: o.allow,
|
||||||
Annotations: o.annotations,
|
Annotations: o.annotations,
|
||||||
BuildArgs: buildArgs,
|
BuildArgs: buildArgs,
|
||||||
@@ -156,7 +155,7 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inAttests := append([]string{}, o.attests...)
|
inAttests := slices.Clone(o.attests)
|
||||||
if o.provenance != "" {
|
if o.provenance != "" {
|
||||||
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
||||||
}
|
}
|
||||||
@@ -285,7 +284,11 @@ func (o *buildOptionsHash) String() string {
|
|||||||
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
|
||||||
|
attribute.String("builder", options.builder),
|
||||||
|
attribute.String("context", options.contextPath),
|
||||||
|
attribute.String("dockerfile", options.dockerfileName),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -399,6 +402,10 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||||||
os.Exit(exitcode)
|
os.Exit(exitcode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := resp.ExporterResponse["frontend.result.inlinemessage"]; ok {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n%s\n", v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -411,7 +418,7 @@ func getImageID(resp map[string]string) string {
|
|||||||
return dgst
|
return dgst
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||||
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, opts, dockerCli.In(), printer, false)
|
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, opts, dockerCli.In(), printer, false)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
res.Done()
|
res.Done()
|
||||||
@@ -419,15 +426,12 @@ func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllera
|
|||||||
return resp, dfmap, err
|
return resp, dfmap, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||||
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||||
// stdin must be usable for monitor
|
// stdin must be usable for monitor
|
||||||
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||||
}
|
}
|
||||||
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
|
c := controller.NewController(ctx, dockerCli)
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := c.Close(); err != nil {
|
if err := c.Close(); err != nil {
|
||||||
logrus.Warnf("failed to close server connection %v", err)
|
logrus.Warnf("failed to close server connection %v", err)
|
||||||
@@ -436,7 +440,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
|
|
||||||
// NOTE: buildx server has the current working directory different from the client
|
// NOTE: buildx server has the current working directory different from the client
|
||||||
// so we need to resolve paths to abosolute ones in the client.
|
// so we need to resolve paths to abosolute ones in the client.
|
||||||
opts, err = controllerapi.ResolveOptionPaths(opts)
|
opts, err := cbuild.ResolveOptionPaths(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -462,11 +466,10 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, resp, inputs, err = c.Build(ctx, opts, pr, printer)
|
resp, inputs, err = c.Build(ctx, opts, pr, printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var be *controllererrors.BuildError
|
var be *controllererrors.BuildError
|
||||||
if errors.As(err, &be) {
|
if errors.As(err, &be) {
|
||||||
ref = be.SessionID
|
|
||||||
retErr = err
|
retErr = err
|
||||||
// We can proceed to monitor
|
// We can proceed to monitor
|
||||||
} else {
|
} else {
|
||||||
@@ -506,8 +509,8 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
resp, retErr = monitorBuildResult.Resp, monitorBuildResult.Err
|
resp, retErr = monitorBuildResult.Resp, monitorBuildResult.Err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := c.Disconnect(ctx, ref); err != nil {
|
if err := c.Close(); err != nil {
|
||||||
logrus.Warnf("disconnect error: %v", err)
|
logrus.Warnf("close error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -593,7 +596,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
|||||||
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||||
|
|
||||||
@@ -644,14 +647,6 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
|||||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
|
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
|
||||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--attest=type=provenance"`)
|
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--attest=type=provenance"`)
|
||||||
|
|
||||||
if confutil.IsExperimental() {
|
|
||||||
// TODO: move this to debug command if needed
|
|
||||||
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect")
|
|
||||||
flags.BoolVar(&options.Detach, "detach", false, "Detach buildx server (supported only on linux)")
|
|
||||||
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server)")
|
|
||||||
cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||||
flags.Lookup("check").NoOptDefVal = "true"
|
flags.Lookup("check").NoOptDefVal = "true"
|
||||||
@@ -740,15 +735,15 @@ func checkWarnedFlags(f *pflag.Flag) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
func writeMetadataFile(filename string, dt any) error {
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
b, err := json.MarshalIndent(dt, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
return atomicwriter.WriteFile(filename, b, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
|
||||||
decFunc := func(k, v string) ([]byte, error) {
|
decFunc := func(k, v string) ([]byte, error) {
|
||||||
if k == "result.json" {
|
if k == "result.json" {
|
||||||
// result.json is part of metadata response for subrequests which
|
// result.json is part of metadata response for subrequests which
|
||||||
@@ -757,16 +752,16 @@ func decodeExporterResponse(exporterResponse map[string]string) map[string]inter
|
|||||||
}
|
}
|
||||||
return base64.StdEncoding.DecodeString(v)
|
return base64.StdEncoding.DecodeString(v)
|
||||||
}
|
}
|
||||||
out := make(map[string]interface{})
|
out := make(map[string]any)
|
||||||
for k, v := range exporterResponse {
|
for k, v := range exporterResponse {
|
||||||
dt, err := decFunc(k, v)
|
dt, err := decFunc(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out[k] = v
|
out[k] = v
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var raw map[string]interface{}
|
var raw map[string]any
|
||||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
||||||
var rawList []map[string]interface{}
|
var rawList []map[string]any
|
||||||
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
|
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
|
||||||
out[k] = v
|
out[k] = v
|
||||||
continue
|
continue
|
||||||
@@ -1008,12 +1003,12 @@ func (cfg *invokeConfig) needsDebug(retErr error) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *controllerapi.BuildOptions, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
|
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cbuild.Options, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
|
||||||
con := console.Current()
|
con := console.Current()
|
||||||
if err := con.SetRaw(); err != nil {
|
if err := con.SetRaw(); err != nil {
|
||||||
// TODO: run disconnect in build command (on error case)
|
// TODO: run disconnect in build command (on error case)
|
||||||
if err := c.Disconnect(ctx, ref); err != nil {
|
if err := c.Close(); err != nil {
|
||||||
logrus.Warnf("disconnect error: %v", err)
|
logrus.Warnf("close error: %v", err)
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -3,11 +3,9 @@ package debug
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/docker/buildx/controller"
|
"github.com/docker/buildx/controller"
|
||||||
"github.com/docker/buildx/controller/control"
|
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/monitor"
|
"github.com/docker/buildx/monitor"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
@@ -35,7 +33,6 @@ type DebuggableCmd interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
||||||
var controlOptions control.ControlOptions
|
|
||||||
var progressMode string
|
var progressMode string
|
||||||
var options DebugConfig
|
var options DebugConfig
|
||||||
|
|
||||||
@@ -50,10 +47,7 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
|
c := controller.NewController(ctx, dockerCli)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := c.Close(); err != nil {
|
if err := c.Close(); err != nil {
|
||||||
logrus.Warnf("failed to close server connection %v", err)
|
logrus.Warnf("failed to close server connection %v", err)
|
||||||
@@ -76,13 +70,9 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
|
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
|
||||||
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
|
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
|
||||||
|
|
||||||
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
|
|
||||||
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
|
|
||||||
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
|
|
||||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
||||||
|
|
||||||
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
|
cobrautil.MarkFlagsExperimental(flags, "invoke", "on")
|
||||||
|
|
||||||
for _, c := range children {
|
for _, c := range children {
|
||||||
cmd.AddCommand(c.NewDebugger(&options))
|
cmd.AddCommand(c.NewDebugger(&options))
|
||||||
|
@@ -124,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func printKV(w io.Writer, k string, v interface{}) {
|
func printKV(w io.Writer, k string, v any) {
|
||||||
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
160
commands/history/export.go
Normal file
160
commands/history/export.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop/bundle"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type exportOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
output string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runExport(ctx context.Context, dockerCli command.Cli, opts exportOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx, builder.WithData())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.refs) == 0 {
|
||||||
|
opts.refs = []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []historyRecord
|
||||||
|
for _, ref := range opts.refs {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref == "" {
|
||||||
|
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
res = append(res, recs...)
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
res = append(res, recs[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
visited := map[*builder.Node]struct{}{}
|
||||||
|
var clients []*client.Client
|
||||||
|
for _, rec := range res {
|
||||||
|
if _, ok := visited[rec.node]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clients = append(clients, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport := make([]*bundle.Record, 0, len(res))
|
||||||
|
for _, rec := range res {
|
||||||
|
var defaultPlatform string
|
||||||
|
if p := rec.node.Platforms; len(p) > 0 {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
var stg *localstate.StateGroup
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
if st != nil && st.GroupRef != "" {
|
||||||
|
stg, err = ls.ReadGroup(st.GroupRef)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport = append(toExport, &bundle.Record{
|
||||||
|
BuildHistoryRecord: rec.BuildHistoryRecord,
|
||||||
|
DefaultPlatform: defaultPlatform,
|
||||||
|
LocalState: st,
|
||||||
|
StateGroup: stg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var w io.Writer = os.Stdout
|
||||||
|
if opts.output != "" {
|
||||||
|
f, err := os.Create(opts.output)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create output file %q", opts.output)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
w = f
|
||||||
|
} else {
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
return errors.Errorf("refusing to write to console, use --output to specify a file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bundle.Export(ctx, clients, w, toExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options exportOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "export [OPTIONS] [REF]",
|
||||||
|
Short: "Export a build into Docker Desktop bundle",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if options.all && len(args) > 0 {
|
||||||
|
return errors.New("cannot specify refs when using --all")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runExport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVarP(&options.output, "output", "o", "", "Output file path")
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Export all records for the builder")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
135
commands/history/import.go
Normal file
135
commands/history/import.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOptions struct {
|
||||||
|
file []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
|
||||||
|
sock, err := desktop.BuildServerAddr()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||||
|
network, addr, ok := strings.Cut(sock, "://")
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid endpoint address: %s", sock)
|
||||||
|
}
|
||||||
|
return remoteutil.DialContext(ctx, network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
|
||||||
|
if len(opts.file) == 0 {
|
||||||
|
u, err := importFrom(ctx, client, os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
} else {
|
||||||
|
for _, fn := range opts.file {
|
||||||
|
var f *os.File
|
||||||
|
var rdr io.Reader = os.Stdin
|
||||||
|
if fn != "-" {
|
||||||
|
f, err = os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open file %s", fn)
|
||||||
|
}
|
||||||
|
rdr = f
|
||||||
|
}
|
||||||
|
u, err := importFrom(ctx, client, rdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(urls) == 0 {
|
||||||
|
return errors.New("no build records found in the bundle")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, url := range urls {
|
||||||
|
fmt.Fprintln(dockerCli.Err(), url)
|
||||||
|
if i == 0 {
|
||||||
|
err = browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create request")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, errors.Errorf("failed to import build: %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var refs []string
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&refs); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to decode response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
for _, ref := range refs {
|
||||||
|
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
|
||||||
|
}
|
||||||
|
return urls, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
|
||||||
|
var options importOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "import [OPTIONS] < bundle.dockerbuild",
|
||||||
|
Short: "Import a build into Docker Desktop",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runImport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
@@ -173,7 +173,7 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -185,14 +185,7 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions)
|
|||||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.ref == "" {
|
|
||||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
|
||||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
rec := &recs[0]
|
rec := &recs[0]
|
||||||
|
|
||||||
c, err := rec.node.Driver.Client(ctx)
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -353,7 +346,7 @@ workers0:
|
|||||||
out.Error.Name = name
|
out.Error.Name = name
|
||||||
out.Error.Logs = logs
|
out.Error.Logs = logs
|
||||||
}
|
}
|
||||||
out.Error.Stack = []byte(fmt.Sprintf("%+v", stack.Formatter(retErr)))
|
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3,7 +3,6 @@ package history
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
@@ -42,7 +41,7 @@ func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -54,12 +53,6 @@ func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOp
|
|||||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.ref == "" {
|
|
||||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
|
||||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
rec := &recs[0]
|
rec := &recs[0]
|
||||||
|
|
||||||
c, err := rec.node.Driver.Client(ctx)
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
@@ -39,7 +38,7 @@ func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -51,12 +50,6 @@ func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error
|
|||||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.ref == "" {
|
|
||||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
|
||||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
rec := &recs[0]
|
rec := &recs[0]
|
||||||
c, err := rec.node.Driver.Client(ctx)
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -14,10 +15,12 @@ import (
|
|||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/buildx/util/gitutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -38,6 +41,9 @@ type lsOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
format string
|
format string
|
||||||
noTrunc bool
|
noTrunc bool
|
||||||
|
|
||||||
|
filters []string
|
||||||
|
local bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||||
@@ -56,7 +62,29 @@ func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := queryRecords(ctx, "", nodes)
|
queryOptions := &queryOptions{}
|
||||||
|
|
||||||
|
if opts.local {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||||
|
if err != nil {
|
||||||
|
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||||
|
return errors.Wrap(err, "git was not found in the system")
|
||||||
|
}
|
||||||
|
return errors.Wrapf(err, "could not find git repository for local filter")
|
||||||
|
}
|
||||||
|
remote, err := gitc.RemoteURL()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "could not get remote URL for local filter")
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, fmt.Sprintf("repository=%s", remote))
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, opts.filters...)
|
||||||
|
|
||||||
|
out, err := queryRecords(ctx, "", nodes, queryOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -92,6 +120,8 @@ func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
flags.StringArrayVar(&options.filters, "filter", nil, `Provide filter values (e.g., "status=error")`)
|
||||||
|
flags.BoolVar(&options.local, "local", false, "List records for current repository only")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@@ -161,7 +191,7 @@ type lsContext struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
m := map[string]interface{}{
|
m := map[string]any{
|
||||||
"ref": c.FullRef(),
|
"ref": c.FullRef(),
|
||||||
"name": c.Name(),
|
"name": c.Name(),
|
||||||
"status": c.Status(),
|
"status": c.Status(),
|
||||||
|
@@ -3,7 +3,6 @@ package history
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
@@ -35,7 +34,7 @@ func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -47,12 +46,6 @@ func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error
|
|||||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.ref == "" {
|
|
||||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
|
||||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
rec := &recs[0]
|
rec := &recs[0]
|
||||||
|
|
||||||
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||||
|
@@ -25,6 +25,8 @@ func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *c
|
|||||||
inspectCmd(dockerCli, opts),
|
inspectCmd(dockerCli, opts),
|
||||||
openCmd(dockerCli, opts),
|
openCmd(dockerCli, opts),
|
||||||
traceCmd(dockerCli, opts),
|
traceCmd(dockerCli, opts),
|
||||||
|
importCmd(dockerCli, opts),
|
||||||
|
exportCmd(dockerCli, opts),
|
||||||
)
|
)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
@@ -8,9 +8,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
@@ -37,51 +34,20 @@ type traceOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
||||||
var offset *int
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
if strings.HasPrefix(ref, "^") {
|
CompletedOnly: true,
|
||||||
off, err := strconv.Atoi(ref[1:])
|
})
|
||||||
if err != nil {
|
|
||||||
return "", nil, errors.Wrapf(err, "invalid offset %q", ref)
|
|
||||||
}
|
|
||||||
offset = &off
|
|
||||||
ref = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, ref, nodes)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var rec *historyRecord
|
if len(recs) == 0 {
|
||||||
|
|
||||||
if ref == "" {
|
|
||||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
|
||||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
|
||||||
})
|
|
||||||
for _, r := range recs {
|
|
||||||
if r.CompletedAt != nil {
|
|
||||||
if offset != nil {
|
|
||||||
if *offset > 0 {
|
|
||||||
*offset--
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rec = &r
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if offset != nil && *offset > 0 {
|
|
||||||
return "", nil, errors.Errorf("no completed build found with offset %d", *offset)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
rec = &recs[0]
|
|
||||||
}
|
|
||||||
if rec == nil {
|
|
||||||
if ref == "" {
|
if ref == "" {
|
||||||
return "", nil, errors.New("no records found")
|
return "", nil, errors.New("no records found")
|
||||||
}
|
}
|
||||||
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
||||||
}
|
}
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
if rec.CompletedAt == nil {
|
if rec.CompletedAt == nil {
|
||||||
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
||||||
@@ -103,7 +69,9 @@ func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, [
|
|||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node})
|
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@@ -1,10 +1,14 @@
|
|||||||
package history
|
package history
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,10 +17,13 @@ import (
|
|||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/localstate"
|
"github.com/docker/buildx/localstate"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const recordsLimit = 50
|
||||||
|
|
||||||
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||||
var res string
|
var res string
|
||||||
|
|
||||||
@@ -106,10 +113,30 @@ type historyRecord struct {
|
|||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]historyRecord, error) {
|
type queryOptions struct {
|
||||||
|
CompletedOnly bool
|
||||||
|
Filters []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var out []historyRecord
|
var out []historyRecord
|
||||||
|
|
||||||
|
var offset *int
|
||||||
|
if strings.HasPrefix(ref, "^") {
|
||||||
|
off, err := strconv.Atoi(ref[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid offset %q", ref)
|
||||||
|
}
|
||||||
|
offset = &off
|
||||||
|
ref = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var filters []string
|
||||||
|
if opts != nil {
|
||||||
|
filters = opts.Filters
|
||||||
|
}
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
node := node
|
node := node
|
||||||
@@ -122,9 +149,25 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]hist
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var matchers []matchFunc
|
||||||
|
if len(filters) > 0 {
|
||||||
|
filters, matchers, err = dockerFiltersToBuildkit(filters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sb := bytes.NewBuffer(nil)
|
||||||
|
w := csv.NewWriter(sb)
|
||||||
|
w.Write(filters)
|
||||||
|
w.Flush()
|
||||||
|
filters = []string{strings.TrimSuffix(sb.String(), "\n")}
|
||||||
|
}
|
||||||
|
|
||||||
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
EarlyExit: true,
|
EarlyExit: true,
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
|
Limit: recordsLimit,
|
||||||
|
Filter: filters,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -142,6 +185,7 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]hist
|
|||||||
ts = &t
|
ts = &t
|
||||||
}
|
}
|
||||||
defer serv.CloseSend()
|
defer serv.CloseSend()
|
||||||
|
loop0:
|
||||||
for {
|
for {
|
||||||
he, err := serv.Recv()
|
he, err := serv.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -153,6 +197,17 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]hist
|
|||||||
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// for older buildkit that don't support filters apply local filters
|
||||||
|
for _, matcher := range matchers {
|
||||||
|
if !matcher(he.Record) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
records = append(records, historyRecord{
|
records = append(records, historyRecord{
|
||||||
BuildHistoryRecord: he.Record,
|
BuildHistoryRecord: he.Record,
|
||||||
currentTimestamp: ts,
|
currentTimestamp: ts,
|
||||||
@@ -169,6 +224,27 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]hist
|
|||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(out, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
if offset != nil {
|
||||||
|
var filtered []historyRecord
|
||||||
|
for _, r := range out {
|
||||||
|
if *offset > 0 {
|
||||||
|
*offset--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, r)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if *offset > 0 {
|
||||||
|
return nil, errors.Errorf("no completed build found with offset %d", *offset)
|
||||||
|
}
|
||||||
|
out = filtered
|
||||||
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,3 +254,150 @@ func formatDuration(d time.Duration) string {
|
|||||||
}
|
}
|
||||||
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type matchFunc func(*controlapi.BuildHistoryRecord) bool
|
||||||
|
|
||||||
|
func dockerFiltersToBuildkit(in []string) ([]string, []matchFunc, error) {
|
||||||
|
out := []string{}
|
||||||
|
matchers := []matchFunc{}
|
||||||
|
for _, f := range in {
|
||||||
|
key, value, sep, found := cutAny(f, "!=", "=", "<=", "<", ">=", ">")
|
||||||
|
if !found {
|
||||||
|
return nil, nil, errors.Errorf("invalid filter %q", f)
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "ref", "repository", "status":
|
||||||
|
if sep != "=" && sep != "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected = or !=", f)
|
||||||
|
}
|
||||||
|
matchers = append(matchers, valueFiler(key, value, sep))
|
||||||
|
if sep == "=" {
|
||||||
|
if key == "status" {
|
||||||
|
sep = "=="
|
||||||
|
} else {
|
||||||
|
sep = "~="
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "startedAt", "completedAt", "duration":
|
||||||
|
if sep == "=" || sep == "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected <=, <, >= or >", f)
|
||||||
|
}
|
||||||
|
matcher, err := timeBasedFilter(key, value, sep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
matchers = append(matchers, matcher)
|
||||||
|
default:
|
||||||
|
return nil, nil, errors.Errorf("unsupported filter %q", f)
|
||||||
|
}
|
||||||
|
out = append(out, key+sep+value)
|
||||||
|
}
|
||||||
|
return out, matchers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueFiler(key, value, sep string) matchFunc {
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var recValue string
|
||||||
|
switch key {
|
||||||
|
case "ref":
|
||||||
|
recValue = rec.Ref
|
||||||
|
case "repository":
|
||||||
|
v, ok := rec.FrontendAttrs["vcs:source"]
|
||||||
|
if ok {
|
||||||
|
recValue = v
|
||||||
|
} else {
|
||||||
|
if context, ok := rec.FrontendAttrs["context"]; ok {
|
||||||
|
if ref, err := gitutil.ParseGitRef(context); err == nil {
|
||||||
|
recValue = ref.Remote
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "status":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
if rec.Error != nil {
|
||||||
|
if strings.Contains(rec.Error.Message, "context canceled") {
|
||||||
|
recValue = "canceled"
|
||||||
|
} else {
|
||||||
|
recValue = "error"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "completed"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "running"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case "=":
|
||||||
|
if key == "status" {
|
||||||
|
return recValue == value
|
||||||
|
}
|
||||||
|
return strings.Contains(recValue, value)
|
||||||
|
case "!=":
|
||||||
|
return recValue != value
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeBasedFilter(key, value, sep string) (matchFunc, error) {
|
||||||
|
var cmp int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt", "completedAt":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err == nil {
|
||||||
|
tm := time.Now().Add(-v)
|
||||||
|
cmp = tm.Unix()
|
||||||
|
} else {
|
||||||
|
tm, err := time.Parse(time.RFC3339, value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid time %s", value)
|
||||||
|
}
|
||||||
|
cmp = tm.Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid duration %s", value)
|
||||||
|
}
|
||||||
|
cmp = int64(v)
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var val int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt":
|
||||||
|
val = rec.CreatedAt.AsTime().Unix()
|
||||||
|
case "completedAt":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = rec.CompletedAt.AsTime().Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = int64(rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case ">=":
|
||||||
|
return val >= cmp
|
||||||
|
case "<=":
|
||||||
|
return val <= cmp
|
||||||
|
case ">":
|
||||||
|
return val > cmp
|
||||||
|
default:
|
||||||
|
return val < cmp
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cutAny(s string, seps ...string) (before, after, sep string, found bool) {
|
||||||
|
for _, sep := range seps {
|
||||||
|
if idx := strings.Index(s, sep); idx != -1 {
|
||||||
|
return s[:idx], s[idx+len(sep):], sep, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, "", "", false
|
||||||
|
}
|
||||||
|
@@ -194,7 +194,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
}
|
}
|
||||||
s := s
|
s := s
|
||||||
eg2.Go(func() error {
|
eg2.Go(func() error {
|
||||||
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
|
||||||
return r.Copy(ctx, s, t)
|
return r.Copy(ctx, s, t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -202,7 +202,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
if err := eg2.Wait(); err != nil {
|
if err := eg2.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
|
||||||
return r.Push(ctx, t, desc, dt)
|
return r.Push(ctx, t, desc, dt)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -409,9 +410,7 @@ func truncPlatforms(pfs []string, max int) truncatedPlatforms {
|
|||||||
left[ppf] = append(left[ppf], pf)
|
left[ppf] = append(left[ppf], pf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for k, v := range left {
|
maps.Copy(res, left)
|
||||||
res[k] = v
|
|
||||||
}
|
|
||||||
return truncatedPlatforms{
|
return truncatedPlatforms{
|
||||||
res: res,
|
res: res,
|
||||||
input: pfs,
|
input: pfs,
|
||||||
|
@@ -7,7 +7,6 @@ import (
|
|||||||
debugcmd "github.com/docker/buildx/commands/debug"
|
debugcmd "github.com/docker/buildx/commands/debug"
|
||||||
historycmd "github.com/docker/buildx/commands/history"
|
historycmd "github.com/docker/buildx/commands/history"
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/controller/remote"
|
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/logutil"
|
"github.com/docker/buildx/util/logutil"
|
||||||
@@ -16,13 +15,14 @@ import (
|
|||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra.Command {
|
||||||
var opt rootOptions
|
var opt rootOptions
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Short: "Docker Buildx",
|
Short: "Docker Buildx",
|
||||||
@@ -40,7 +40,17 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
}
|
}
|
||||||
cmd.SetContext(appcontext.Context())
|
cmd.SetContext(appcontext.Context())
|
||||||
if !isPlugin {
|
if !isPlugin {
|
||||||
return nil
|
// InstallFlags and SetDefaultOptions are necessary to match
|
||||||
|
// the plugin mode behavior to handle env vars such as
|
||||||
|
// DOCKER_TLS, DOCKER_TLS_VERIFY, ... and we also need to use a
|
||||||
|
// new flagset to avoid conflict with the global debug flag
|
||||||
|
// that we already handle in the root command otherwise it
|
||||||
|
// would panic.
|
||||||
|
nflags := pflag.NewFlagSet(cmd.DisplayName(), pflag.ContinueOnError)
|
||||||
|
options := cliflags.NewClientOptions()
|
||||||
|
options.InstallFlags(nflags)
|
||||||
|
options.SetDefaultOptions(nflags)
|
||||||
|
return dockerCli.Initialize(options)
|
||||||
}
|
}
|
||||||
return plugin.PersistentPreRunE(cmd, args)
|
return plugin.PersistentPreRunE(cmd, args)
|
||||||
},
|
},
|
||||||
@@ -113,7 +123,6 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
|||||||
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||||
newDebuggableBuild(dockerCli, opts),
|
newDebuggableBuild(dockerCli, opts),
|
||||||
))
|
))
|
||||||
remote.AddControllerCommands(cmd, dockerCli)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.RegisterFlagCompletionFunc( //nolint:errcheck
|
cmd.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||||
|
@@ -34,7 +34,7 @@ const defaultTargetName = "default"
|
|||||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||||
// inspect the result and debug the cause of that error.
|
// inspect the result and debug the cause of that error.
|
||||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
func RunBuild(ctx context.Context, dockerCli command.Cli, in *Options, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
||||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||||
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||||
}
|
}
|
||||||
|
@@ -1,15 +1,52 @@
|
|||||||
package pb
|
package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
sourcepolicy "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
"github.com/moby/buildkit/util/gitutil"
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
ContextPath string
|
||||||
|
DockerfileName string
|
||||||
|
CallFunc *pb.CallFunc
|
||||||
|
NamedContexts map[string]string
|
||||||
|
Allow []string
|
||||||
|
Attests []*pb.Attest
|
||||||
|
BuildArgs map[string]string
|
||||||
|
CacheFrom []*pb.CacheOptionsEntry
|
||||||
|
CacheTo []*pb.CacheOptionsEntry
|
||||||
|
CgroupParent string
|
||||||
|
Exports []*pb.ExportEntry
|
||||||
|
ExtraHosts []string
|
||||||
|
Labels map[string]string
|
||||||
|
NetworkMode string
|
||||||
|
NoCacheFilter []string
|
||||||
|
Platforms []string
|
||||||
|
Secrets []*pb.Secret
|
||||||
|
ShmSize int64
|
||||||
|
SSH []*pb.SSH
|
||||||
|
Tags []string
|
||||||
|
Target string
|
||||||
|
Ulimits *pb.UlimitOpt
|
||||||
|
Builder string
|
||||||
|
NoCache bool
|
||||||
|
Pull bool
|
||||||
|
ExportPush bool
|
||||||
|
ExportLoad bool
|
||||||
|
SourcePolicy *sourcepolicy.Policy
|
||||||
|
Ref string
|
||||||
|
GroupRef string
|
||||||
|
Annotations []string
|
||||||
|
ProvenanceResponseMode string
|
||||||
|
}
|
||||||
|
|
||||||
// ResolveOptionPaths resolves all paths contained in BuildOptions
|
// ResolveOptionPaths resolves all paths contained in BuildOptions
|
||||||
// and replaces them to absolute paths.
|
// and replaces them to absolute paths.
|
||||||
func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
func ResolveOptionPaths(options *Options) (_ *Options, err error) {
|
||||||
localContext := false
|
localContext := false
|
||||||
if options.ContextPath != "" && options.ContextPath != "-" {
|
if options.ContextPath != "" && options.ContextPath != "-" {
|
||||||
if !isRemoteURL(options.ContextPath) {
|
if !isRemoteURL(options.ContextPath) {
|
||||||
@@ -56,7 +93,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
options.NamedContexts = contexts
|
options.NamedContexts = contexts
|
||||||
|
|
||||||
var cacheFrom []*CacheOptionsEntry
|
var cacheFrom []*pb.CacheOptionsEntry
|
||||||
for _, co := range options.CacheFrom {
|
for _, co := range options.CacheFrom {
|
||||||
switch co.Type {
|
switch co.Type {
|
||||||
case "local":
|
case "local":
|
||||||
@@ -87,7 +124,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
options.CacheFrom = cacheFrom
|
options.CacheFrom = cacheFrom
|
||||||
|
|
||||||
var cacheTo []*CacheOptionsEntry
|
var cacheTo []*pb.CacheOptionsEntry
|
||||||
for _, co := range options.CacheTo {
|
for _, co := range options.CacheTo {
|
||||||
switch co.Type {
|
switch co.Type {
|
||||||
case "local":
|
case "local":
|
||||||
@@ -117,7 +154,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
options.CacheTo = cacheTo
|
options.CacheTo = cacheTo
|
||||||
var exports []*ExportEntry
|
var exports []*pb.ExportEntry
|
||||||
for _, e := range options.Exports {
|
for _, e := range options.Exports {
|
||||||
if e.Destination != "" && e.Destination != "-" {
|
if e.Destination != "" && e.Destination != "-" {
|
||||||
e.Destination, err = filepath.Abs(e.Destination)
|
e.Destination, err = filepath.Abs(e.Destination)
|
||||||
@@ -129,7 +166,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
options.Exports = exports
|
options.Exports = exports
|
||||||
|
|
||||||
var secrets []*Secret
|
var secrets []*pb.Secret
|
||||||
for _, s := range options.Secrets {
|
for _, s := range options.Secrets {
|
||||||
if s.FilePath != "" {
|
if s.FilePath != "" {
|
||||||
s.FilePath, err = filepath.Abs(s.FilePath)
|
s.FilePath, err = filepath.Abs(s.FilePath)
|
||||||
@@ -141,7 +178,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
options.Secrets = secrets
|
options.Secrets = secrets
|
||||||
|
|
||||||
var ssh []*SSH
|
var ssh []*pb.SSH
|
||||||
for _, s := range options.SSH {
|
for _, s := range options.SSH {
|
||||||
var ps []string
|
var ps []string
|
||||||
for _, pt := range s.Paths {
|
for _, pt := range s.Paths {
|
@@ -1,12 +1,12 @@
|
|||||||
package pb
|
package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestResolvePaths(t *testing.T) {
|
func TestResolvePaths(t *testing.T) {
|
||||||
@@ -16,59 +16,59 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
require.NoError(t, os.Chdir(tmpwd))
|
require.NoError(t, os.Chdir(tmpwd))
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
options *BuildOptions
|
options *Options
|
||||||
want *BuildOptions
|
want *Options
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "contextpath",
|
name: "contextpath",
|
||||||
options: &BuildOptions{ContextPath: "test"},
|
options: &Options{ContextPath: "test"},
|
||||||
want: &BuildOptions{ContextPath: filepath.Join(tmpwd, "test")},
|
want: &Options{ContextPath: filepath.Join(tmpwd, "test")},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-cwd",
|
name: "contextpath-cwd",
|
||||||
options: &BuildOptions{ContextPath: "."},
|
options: &Options{ContextPath: "."},
|
||||||
want: &BuildOptions{ContextPath: tmpwd},
|
want: &Options{ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-dash",
|
name: "contextpath-dash",
|
||||||
options: &BuildOptions{ContextPath: "-"},
|
options: &Options{ContextPath: "-"},
|
||||||
want: &BuildOptions{ContextPath: "-"},
|
want: &Options{ContextPath: "-"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-ssh",
|
name: "contextpath-ssh",
|
||||||
options: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
options: &Options{ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
want: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
want: &Options{ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename",
|
name: "dockerfilename",
|
||||||
options: &BuildOptions{DockerfileName: "test", ContextPath: "."},
|
options: &Options{DockerfileName: "test", ContextPath: "."},
|
||||||
want: &BuildOptions{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
|
want: &Options{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename-dash",
|
name: "dockerfilename-dash",
|
||||||
options: &BuildOptions{DockerfileName: "-", ContextPath: "."},
|
options: &Options{DockerfileName: "-", ContextPath: "."},
|
||||||
want: &BuildOptions{DockerfileName: "-", ContextPath: tmpwd},
|
want: &Options{DockerfileName: "-", ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename-remote",
|
name: "dockerfilename-remote",
|
||||||
options: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
options: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
want: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
want: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contexts",
|
name: "contexts",
|
||||||
options: &BuildOptions{NamedContexts: map[string]string{
|
options: &Options{NamedContexts: map[string]string{
|
||||||
"a": "test1", "b": "test2",
|
"a": "test1", "b": "test2",
|
||||||
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
||||||
}},
|
}},
|
||||||
want: &BuildOptions{NamedContexts: map[string]string{
|
want: &Options{NamedContexts: map[string]string{
|
||||||
"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
|
"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
|
||||||
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cache-from",
|
name: "cache-from",
|
||||||
options: &BuildOptions{
|
options: &Options{
|
||||||
CacheFrom: []*CacheOptionsEntry{
|
CacheFrom: []*pb.CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Attrs: map[string]string{"src": "test"},
|
Attrs: map[string]string{"src": "test"},
|
||||||
@@ -79,8 +79,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: &BuildOptions{
|
want: &Options{
|
||||||
CacheFrom: []*CacheOptionsEntry{
|
CacheFrom: []*pb.CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Attrs: map[string]string{"src": filepath.Join(tmpwd, "test")},
|
Attrs: map[string]string{"src": filepath.Join(tmpwd, "test")},
|
||||||
@@ -94,8 +94,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cache-to",
|
name: "cache-to",
|
||||||
options: &BuildOptions{
|
options: &Options{
|
||||||
CacheTo: []*CacheOptionsEntry{
|
CacheTo: []*pb.CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Attrs: map[string]string{"dest": "test"},
|
Attrs: map[string]string{"dest": "test"},
|
||||||
@@ -106,8 +106,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: &BuildOptions{
|
want: &Options{
|
||||||
CacheTo: []*CacheOptionsEntry{
|
CacheTo: []*pb.CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Attrs: map[string]string{"dest": filepath.Join(tmpwd, "test")},
|
Attrs: map[string]string{"dest": filepath.Join(tmpwd, "test")},
|
||||||
@@ -121,8 +121,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "exports",
|
name: "exports",
|
||||||
options: &BuildOptions{
|
options: &Options{
|
||||||
Exports: []*ExportEntry{
|
Exports: []*pb.ExportEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Destination: "-",
|
Destination: "-",
|
||||||
@@ -149,8 +149,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: &BuildOptions{
|
want: &Options{
|
||||||
Exports: []*ExportEntry{
|
Exports: []*pb.ExportEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Destination: "-",
|
Destination: "-",
|
||||||
@@ -180,8 +180,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "secrets",
|
name: "secrets",
|
||||||
options: &BuildOptions{
|
options: &Options{
|
||||||
Secrets: []*Secret{
|
Secrets: []*pb.Secret{
|
||||||
{
|
{
|
||||||
FilePath: "test1",
|
FilePath: "test1",
|
||||||
},
|
},
|
||||||
@@ -195,8 +195,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: &BuildOptions{
|
want: &Options{
|
||||||
Secrets: []*Secret{
|
Secrets: []*pb.Secret{
|
||||||
{
|
{
|
||||||
FilePath: filepath.Join(tmpwd, "test1"),
|
FilePath: filepath.Join(tmpwd, "test1"),
|
||||||
},
|
},
|
||||||
@@ -213,8 +213,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ssh",
|
name: "ssh",
|
||||||
options: &BuildOptions{
|
options: &Options{
|
||||||
SSH: []*SSH{
|
SSH: []*pb.SSH{
|
||||||
{
|
{
|
||||||
ID: "default",
|
ID: "default",
|
||||||
Paths: []string{"test1", "test2"},
|
Paths: []string{"test1", "test2"},
|
||||||
@@ -225,8 +225,8 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: &BuildOptions{
|
want: &Options{
|
||||||
SSH: []*SSH{
|
SSH: []*pb.SSH{
|
||||||
{
|
{
|
||||||
ID: "default",
|
ID: "default",
|
||||||
Paths: []string{filepath.Join(tmpwd, "test1"), filepath.Join(tmpwd, "test2")},
|
Paths: []string{filepath.Join(tmpwd, "test1"), filepath.Join(tmpwd, "test2")},
|
||||||
@@ -244,9 +244,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got, err := ResolveOptionPaths(tt.options)
|
got, err := ResolveOptionPaths(tt.options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if !proto.Equal(tt.want, got) {
|
require.Equal(t, tt.want, got)
|
||||||
t.Fatalf("expected %#v, got %#v", tt.want, got)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
@@ -5,29 +5,22 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
cbuild "github.com/docker/buildx/controller/build"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/controller/processes"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BuildxController interface {
|
type BuildxController interface {
|
||||||
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
|
Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||||
// Invoke starts an IO session into the specified process.
|
// Invoke starts an IO session into the specified process.
|
||||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
// If pid doesn't match to any running processes, it starts a new process with the specified config.
|
||||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
|
||||||
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
||||||
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
Invoke(ctx context.Context, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
||||||
Kill(ctx context.Context) error
|
|
||||||
Close() error
|
Close() error
|
||||||
List(ctx context.Context) (refs []string, _ error)
|
ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error)
|
||||||
Disconnect(ctx context.Context, ref string) error
|
DisconnectProcess(ctx context.Context, pid string) error
|
||||||
ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error)
|
Inspect(ctx context.Context) *cbuild.Options
|
||||||
DisconnectProcess(ctx context.Context, ref, pid string) error
|
|
||||||
Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ControlOptions struct {
|
|
||||||
ServerConfig string
|
|
||||||
Root string
|
|
||||||
Detach bool
|
|
||||||
}
|
}
|
||||||
|
@@ -2,35 +2,12 @@ package controller
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/controller/control"
|
"github.com/docker/buildx/controller/control"
|
||||||
"github.com/docker/buildx/controller/local"
|
"github.com/docker/buildx/controller/local"
|
||||||
"github.com/docker/buildx/controller/remote"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewController(ctx context.Context, opts control.ControlOptions, dockerCli command.Cli, pw progress.Writer) (control.BuildxController, error) {
|
func NewController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
|
||||||
var name string
|
return local.NewLocalBuildxController(ctx, dockerCli)
|
||||||
if opts.Detach {
|
|
||||||
name = "remote"
|
|
||||||
} else {
|
|
||||||
name = "local"
|
|
||||||
}
|
|
||||||
|
|
||||||
var c control.BuildxController
|
|
||||||
err := progress.Wrap(fmt.Sprintf("[internal] connecting to %s controller", name), pw.Write, func(l progress.SubLogger) (err error) {
|
|
||||||
if opts.Detach {
|
|
||||||
c, err = remote.NewRemoteBuildxController(ctx, dockerCli, opts, l)
|
|
||||||
} else {
|
|
||||||
c = local.NewLocalBuildxController(ctx, dockerCli, l)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to start buildx controller")
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
}
|
||||||
|
@@ -1,48 +1,20 @@
|
|||||||
package errdefs
|
package errdefs
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/containerd/typeurl/v2"
|
|
||||||
"github.com/docker/buildx/util/desktop"
|
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
typeurl.Register((*Build)(nil), "github.com/docker/buildx", "errdefs.Build+json")
|
|
||||||
}
|
|
||||||
|
|
||||||
type BuildError struct {
|
type BuildError struct {
|
||||||
*Build
|
err error
|
||||||
error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *BuildError) Unwrap() error {
|
func (e *BuildError) Unwrap() error {
|
||||||
return e.error
|
return e.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *BuildError) ToProto() grpcerrors.TypedErrorProto {
|
func (e *BuildError) Error() string {
|
||||||
return e.Build
|
return e.err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *BuildError) PrintBuildDetails(w io.Writer) error {
|
func WrapBuild(err error) error {
|
||||||
if e.Ref == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ebr := &desktop.ErrorWithBuildRef{
|
|
||||||
Ref: e.Ref,
|
|
||||||
Err: e.error,
|
|
||||||
}
|
|
||||||
return ebr.Print(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func WrapBuild(err error, sessionID string, ref string) error {
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &BuildError{Build: &Build{SessionID: sessionID, Ref: ref}, error: err}
|
return &BuildError{err: err}
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Build) WrapError(err error) error {
|
|
||||||
return &BuildError{error: err, Build: b}
|
|
||||||
}
|
}
|
||||||
|
@@ -1,157 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.34.1
|
|
||||||
// protoc v3.11.4
|
|
||||||
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
|
|
||||||
|
|
||||||
package errdefs
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
type Build struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"`
|
|
||||||
Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Build) Reset() {
|
|
||||||
*x = Build{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Build) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Build) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *Build) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use Build.ProtoReflect.Descriptor instead.
|
|
||||||
func (*Build) Descriptor() ([]byte, []int) {
|
|
||||||
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Build) GetSessionID() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.SessionID
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Build) GetRef() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Ref
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_github_com_docker_buildx_controller_errdefs_errdefs_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63,
|
|
||||||
0x6b, 0x65, 0x72, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2f, 0x65, 0x72,
|
|
||||||
0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x6f, 0x63,
|
|
||||||
0x6b, 0x65, 0x72, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65,
|
|
||||||
0x66, 0x73, 0x22, 0x37, 0x0a, 0x05, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x53,
|
|
||||||
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
|
||||||
0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66,
|
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x42, 0x2d, 0x5a, 0x2b, 0x67,
|
|
||||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72,
|
|
||||||
0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
|
||||||
0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
|
||||||
0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce sync.Once
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP() []byte {
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce.Do(func() {
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData)
|
|
||||||
})
|
|
||||||
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
|
||||||
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = []interface{}{
|
|
||||||
(*Build)(nil), // 0: docker.buildx.errdefs.Build
|
|
||||||
}
|
|
||||||
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() }
|
|
||||||
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() {
|
|
||||||
if File_github_com_docker_buildx_controller_errdefs_errdefs_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*Build); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 1,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs,
|
|
||||||
MessageInfos: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_docker_buildx_controller_errdefs_errdefs_proto = out.File
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = nil
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = nil
|
|
||||||
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = nil
|
|
||||||
}
|
|
@@ -1,10 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package docker.buildx.errdefs;
|
|
||||||
|
|
||||||
option go_package = "github.com/docker/buildx/controller/errdefs";
|
|
||||||
|
|
||||||
message Build {
|
|
||||||
string SessionID = 1;
|
|
||||||
string Ref = 2;
|
|
||||||
}
|
|
@@ -1,241 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
|
||||||
// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10
|
|
||||||
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
|
|
||||||
|
|
||||||
package errdefs
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
|
|
||||||
proto "google.golang.org/protobuf/proto"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
io "io"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (m *Build) CloneVT() *Build {
|
|
||||||
if m == nil {
|
|
||||||
return (*Build)(nil)
|
|
||||||
}
|
|
||||||
r := new(Build)
|
|
||||||
r.SessionID = m.SessionID
|
|
||||||
r.Ref = m.Ref
|
|
||||||
if len(m.unknownFields) > 0 {
|
|
||||||
r.unknownFields = make([]byte, len(m.unknownFields))
|
|
||||||
copy(r.unknownFields, m.unknownFields)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Build) CloneMessageVT() proto.Message {
|
|
||||||
return m.CloneVT()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *Build) EqualVT(that *Build) bool {
|
|
||||||
if this == that {
|
|
||||||
return true
|
|
||||||
} else if this == nil || that == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if this.SessionID != that.SessionID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if this.Ref != that.Ref {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return string(this.unknownFields) == string(that.unknownFields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *Build) EqualMessageVT(thatMsg proto.Message) bool {
|
|
||||||
that, ok := thatMsg.(*Build)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return this.EqualVT(that)
|
|
||||||
}
|
|
||||||
func (m *Build) MarshalVT() (dAtA []byte, err error) {
|
|
||||||
if m == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
size := m.SizeVT()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Build) MarshalToVT(dAtA []byte) (int, error) {
|
|
||||||
size := m.SizeVT()
|
|
||||||
return m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Build) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
|
||||||
if m == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
i := len(dAtA)
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if m.unknownFields != nil {
|
|
||||||
i -= len(m.unknownFields)
|
|
||||||
copy(dAtA[i:], m.unknownFields)
|
|
||||||
}
|
|
||||||
if len(m.Ref) > 0 {
|
|
||||||
i -= len(m.Ref)
|
|
||||||
copy(dAtA[i:], m.Ref)
|
|
||||||
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ref)))
|
|
||||||
i--
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
}
|
|
||||||
if len(m.SessionID) > 0 {
|
|
||||||
i -= len(m.SessionID)
|
|
||||||
copy(dAtA[i:], m.SessionID)
|
|
||||||
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SessionID)))
|
|
||||||
i--
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
}
|
|
||||||
return len(dAtA) - i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Build) SizeVT() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.SessionID)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Ref)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
|
|
||||||
}
|
|
||||||
n += len(m.unknownFields)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Build) UnmarshalVT(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return protohelpers.ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Build: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return protohelpers.ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return protohelpers.ErrInvalidLength
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return protohelpers.ErrInvalidLength
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.SessionID = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return protohelpers.ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return protohelpers.ErrInvalidLength
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return protohelpers.ErrInvalidLength
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Ref = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := protohelpers.Skip(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
|
||||||
return protohelpers.ErrInvalidLength
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@@ -11,7 +11,6 @@ import (
|
|||||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/controller/processes"
|
"github.com/docker/buildx/controller/processes"
|
||||||
"github.com/docker/buildx/util/desktop"
|
|
||||||
"github.com/docker/buildx/util/ioset"
|
"github.com/docker/buildx/util/ioset"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -19,10 +18,9 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
|
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
|
||||||
return &localController{
|
return &localController{
|
||||||
dockerCli: dockerCli,
|
dockerCli: dockerCli,
|
||||||
sessionID: "local",
|
|
||||||
processes: processes.NewManager(),
|
processes: processes.NewManager(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -31,21 +29,20 @@ type buildConfig struct {
|
|||||||
// TODO: these two structs should be merged
|
// TODO: these two structs should be merged
|
||||||
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
|
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
|
||||||
resultCtx *build.ResultHandle
|
resultCtx *build.ResultHandle
|
||||||
buildOptions *controllerapi.BuildOptions
|
buildOptions *cbuild.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
type localController struct {
|
type localController struct {
|
||||||
dockerCli command.Cli
|
dockerCli command.Cli
|
||||||
sessionID string
|
|
||||||
buildConfig buildConfig
|
buildConfig buildConfig
|
||||||
processes *processes.Manager
|
processes *processes.Manager
|
||||||
|
|
||||||
buildOnGoing atomic.Bool
|
buildOnGoing atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
func (b *localController) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (*client.SolveResponse, *build.Inputs, error) {
|
||||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||||
return "", nil, nil, errors.New("build ongoing")
|
return nil, nil, errors.New("build ongoing")
|
||||||
}
|
}
|
||||||
defer b.buildOnGoing.Store(false)
|
defer b.buildOnGoing.Store(false)
|
||||||
|
|
||||||
@@ -57,31 +54,20 @@ func (b *localController) Build(ctx context.Context, options *controllerapi.Buil
|
|||||||
buildOptions: options,
|
buildOptions: options,
|
||||||
}
|
}
|
||||||
if buildErr != nil {
|
if buildErr != nil {
|
||||||
var ref string
|
buildErr = controllererrors.WrapBuild(buildErr)
|
||||||
var ebr *desktop.ErrorWithBuildRef
|
|
||||||
if errors.As(buildErr, &ebr) {
|
|
||||||
ref = ebr.Ref
|
|
||||||
}
|
|
||||||
buildErr = controllererrors.WrapBuild(buildErr, b.sessionID, ref)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if buildErr != nil {
|
if buildErr != nil {
|
||||||
return "", nil, nil, buildErr
|
return nil, nil, buildErr
|
||||||
}
|
}
|
||||||
return b.sessionID, resp, dockerfileMappings, nil
|
return resp, dockerfileMappings, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) ListProcesses(ctx context.Context, sessionID string) (infos []*controllerapi.ProcessInfo, retErr error) {
|
func (b *localController) ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error) {
|
||||||
if sessionID != b.sessionID {
|
|
||||||
return nil, errors.Errorf("unknown session ID %q", sessionID)
|
|
||||||
}
|
|
||||||
return b.processes.ListProcesses(), nil
|
return b.processes.ListProcesses(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
|
func (b *localController) DisconnectProcess(ctx context.Context, pid string) error {
|
||||||
if sessionID != b.sessionID {
|
|
||||||
return errors.Errorf("unknown session ID %q", sessionID)
|
|
||||||
}
|
|
||||||
return b.processes.DeleteProcess(pid)
|
return b.processes.DeleteProcess(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,11 +75,7 @@ func (b *localController) cancelRunningProcesses() {
|
|||||||
b.processes.CancelRunningProcesses()
|
b.processes.CancelRunningProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Invoke(ctx context.Context, sessionID string, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
func (b *localController) Invoke(ctx context.Context, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
||||||
if sessionID != b.sessionID {
|
|
||||||
return errors.Errorf("unknown session ID %q", sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
proc, ok := b.processes.Get(pid)
|
proc, ok := b.processes.Get(pid)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Start a new process.
|
// Start a new process.
|
||||||
@@ -121,11 +103,6 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Kill(context.Context) error {
|
|
||||||
b.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *localController) Close() error {
|
func (b *localController) Close() error {
|
||||||
b.cancelRunningProcesses()
|
b.cancelRunningProcesses()
|
||||||
if b.buildConfig.resultCtx != nil {
|
if b.buildConfig.resultCtx != nil {
|
||||||
@@ -135,18 +112,6 @@ func (b *localController) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) List(ctx context.Context) (res []string, _ error) {
|
func (b *localController) Inspect(ctx context.Context) *cbuild.Options {
|
||||||
return []string{b.sessionID}, nil
|
return b.buildConfig.buildOptions
|
||||||
}
|
|
||||||
|
|
||||||
func (b *localController) Disconnect(ctx context.Context, key string) error {
|
|
||||||
b.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *localController) Inspect(ctx context.Context, sessionID string) (*controllerapi.InspectResponse, error) {
|
|
||||||
if sessionID != b.sessionID {
|
|
||||||
return nil, errors.Errorf("unknown session ID %q", sessionID)
|
|
||||||
}
|
|
||||||
return &controllerapi.InspectResponse{Options: b.buildConfig.buildOptions}, nil
|
|
||||||
}
|
}
|
||||||
|
@@ -1,5 +1,11 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
|
type Attest struct {
|
||||||
|
Type string
|
||||||
|
Disabled bool
|
||||||
|
Attrs string
|
||||||
|
}
|
||||||
|
|
||||||
func CreateAttestations(attests []*Attest) map[string]*string {
|
func CreateAttestations(attests []*Attest) map[string]*string {
|
||||||
result := map[string]*string{}
|
result := map[string]*string{}
|
||||||
for _, attest := range attests {
|
for _, attest := range attests {
|
||||||
|
@@ -1,6 +1,15 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import "github.com/moby/buildkit/client"
|
import (
|
||||||
|
"maps"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CacheOptionsEntry struct {
|
||||||
|
Type string
|
||||||
|
Attrs map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
||||||
var outs []client.CacheOptionsEntry
|
var outs []client.CacheOptionsEntry
|
||||||
@@ -12,9 +21,7 @@ func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
|||||||
Type: entry.Type,
|
Type: entry.Type,
|
||||||
Attrs: map[string]string{},
|
Attrs: map[string]string{},
|
||||||
}
|
}
|
||||||
for k, v := range entry.Attrs {
|
maps.Copy(out.Attrs, entry.Attrs)
|
||||||
out.Attrs[k] = v
|
|
||||||
}
|
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
}
|
}
|
||||||
return outs
|
return outs
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,250 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package buildx.controller.v1;
|
|
||||||
|
|
||||||
import "github.com/moby/buildkit/api/services/control/control.proto";
|
|
||||||
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/docker/buildx/controller/pb";
|
|
||||||
|
|
||||||
service Controller {
|
|
||||||
rpc Build(BuildRequest) returns (BuildResponse);
|
|
||||||
rpc Inspect(InspectRequest) returns (InspectResponse);
|
|
||||||
rpc Status(StatusRequest) returns (stream StatusResponse);
|
|
||||||
rpc Input(stream InputMessage) returns (InputResponse);
|
|
||||||
rpc Invoke(stream Message) returns (stream Message);
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
rpc Disconnect(DisconnectRequest) returns (DisconnectResponse);
|
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
|
||||||
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
|
|
||||||
rpc DisconnectProcess(DisconnectProcessRequest) returns (DisconnectProcessResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListProcessesRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListProcessesResponse {
|
|
||||||
repeated ProcessInfo Infos = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessInfo {
|
|
||||||
string ProcessID = 1;
|
|
||||||
InvokeConfig InvokeConfig = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DisconnectProcessRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
string ProcessID = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DisconnectProcessResponse {
|
|
||||||
}
|
|
||||||
|
|
||||||
message BuildRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
BuildOptions Options = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message BuildOptions {
|
|
||||||
string ContextPath = 1;
|
|
||||||
string DockerfileName = 2;
|
|
||||||
CallFunc CallFunc = 3;
|
|
||||||
map<string, string> NamedContexts = 4;
|
|
||||||
|
|
||||||
repeated string Allow = 5;
|
|
||||||
repeated Attest Attests = 6;
|
|
||||||
map<string, string> BuildArgs = 7;
|
|
||||||
repeated CacheOptionsEntry CacheFrom = 8;
|
|
||||||
repeated CacheOptionsEntry CacheTo = 9;
|
|
||||||
string CgroupParent = 10;
|
|
||||||
repeated ExportEntry Exports = 11;
|
|
||||||
repeated string ExtraHosts = 12;
|
|
||||||
map<string, string> Labels = 13;
|
|
||||||
string NetworkMode = 14;
|
|
||||||
repeated string NoCacheFilter = 15;
|
|
||||||
repeated string Platforms = 16;
|
|
||||||
repeated Secret Secrets = 17;
|
|
||||||
int64 ShmSize = 18;
|
|
||||||
repeated SSH SSH = 19;
|
|
||||||
repeated string Tags = 20;
|
|
||||||
string Target = 21;
|
|
||||||
UlimitOpt Ulimits = 22;
|
|
||||||
|
|
||||||
string Builder = 23;
|
|
||||||
bool NoCache = 24;
|
|
||||||
bool Pull = 25;
|
|
||||||
bool ExportPush = 26;
|
|
||||||
bool ExportLoad = 27;
|
|
||||||
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 28;
|
|
||||||
string Ref = 29;
|
|
||||||
string GroupRef = 30;
|
|
||||||
repeated string Annotations = 31;
|
|
||||||
string ProvenanceResponseMode = 32;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExportEntry {
|
|
||||||
string Type = 1;
|
|
||||||
map<string, string> Attrs = 2;
|
|
||||||
string Destination = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CacheOptionsEntry {
|
|
||||||
string Type = 1;
|
|
||||||
map<string, string> Attrs = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Attest {
|
|
||||||
string Type = 1;
|
|
||||||
bool Disabled = 2;
|
|
||||||
string Attrs = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message SSH {
|
|
||||||
string ID = 1;
|
|
||||||
repeated string Paths = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Secret {
|
|
||||||
string ID = 1;
|
|
||||||
string FilePath = 2;
|
|
||||||
string Env = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CallFunc {
|
|
||||||
string Name = 1;
|
|
||||||
string Format = 2;
|
|
||||||
bool IgnoreStatus = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InspectRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InspectResponse {
|
|
||||||
BuildOptions Options = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UlimitOpt {
|
|
||||||
map<string, Ulimit> values = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Ulimit {
|
|
||||||
string Name = 1;
|
|
||||||
int64 Hard = 2;
|
|
||||||
int64 Soft = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message BuildResponse {
|
|
||||||
map<string, string> ExporterResponse = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DisconnectRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DisconnectResponse {}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated string keys = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InputMessage {
|
|
||||||
oneof Input {
|
|
||||||
InputInitMessage Init = 1;
|
|
||||||
DataMessage Data = 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message InputInitMessage {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DataMessage {
|
|
||||||
bool EOF = 1; // true if eof was reached
|
|
||||||
bytes Data = 2; // should be chunked smaller than 4MB:
|
|
||||||
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
|
|
||||||
}
|
|
||||||
|
|
||||||
message InputResponse {}
|
|
||||||
|
|
||||||
message Message {
|
|
||||||
oneof Input {
|
|
||||||
InitMessage Init = 1;
|
|
||||||
// FdMessage used from client to server for input (stdin) and
|
|
||||||
// from server to client for output (stdout, stderr)
|
|
||||||
FdMessage File = 2;
|
|
||||||
// ResizeMessage used from client to server for terminal resize events
|
|
||||||
ResizeMessage Resize = 3;
|
|
||||||
// SignalMessage is used from client to server to send signal events
|
|
||||||
SignalMessage Signal = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message InitMessage {
|
|
||||||
string SessionID = 1;
|
|
||||||
|
|
||||||
// If ProcessID already exists in the server, it tries to connect to it
|
|
||||||
// instead of invoking the new one. In this case, InvokeConfig will be ignored.
|
|
||||||
string ProcessID = 2;
|
|
||||||
InvokeConfig InvokeConfig = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InvokeConfig {
|
|
||||||
repeated string Entrypoint = 1;
|
|
||||||
repeated string Cmd = 2;
|
|
||||||
bool NoCmd = 11; // Do not set cmd but use the image's default
|
|
||||||
repeated string Env = 3;
|
|
||||||
string User = 4;
|
|
||||||
bool NoUser = 5; // Do not set user but use the image's default
|
|
||||||
string Cwd = 6;
|
|
||||||
bool NoCwd = 7; // Do not set cwd but use the image's default
|
|
||||||
bool Tty = 8;
|
|
||||||
bool Rollback = 9; // Kill all process in the container and recreate it.
|
|
||||||
bool Initial = 10; // Run container from the initial state of that stage (supported only on the failed step)
|
|
||||||
}
|
|
||||||
|
|
||||||
message FdMessage {
|
|
||||||
uint32 Fd = 1; // what fd the data was from
|
|
||||||
bool EOF = 2; // true if eof was reached
|
|
||||||
bytes Data = 3; // should be chunked smaller than 4MB:
|
|
||||||
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
|
|
||||||
}
|
|
||||||
|
|
||||||
message ResizeMessage {
|
|
||||||
uint32 Rows = 1;
|
|
||||||
uint32 Cols = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message SignalMessage {
|
|
||||||
// we only send name (ie HUP, INT) because the int values
|
|
||||||
// are platform dependent.
|
|
||||||
string Name = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatusRequest {
|
|
||||||
string SessionID = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatusResponse {
|
|
||||||
repeated moby.buildkit.v1.Vertex vertexes = 1;
|
|
||||||
repeated moby.buildkit.v1.VertexStatus statuses = 2;
|
|
||||||
repeated moby.buildkit.v1.VertexLog logs = 3;
|
|
||||||
repeated moby.buildkit.v1.VertexWarning warnings = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoRequest {}
|
|
||||||
|
|
||||||
message InfoResponse {
|
|
||||||
BuildxVersion buildxVersion = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message BuildxVersion {
|
|
||||||
string package = 1;
|
|
||||||
string version = 2;
|
|
||||||
string revision = 3;
|
|
||||||
}
|
|
@@ -1,452 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
|
||||||
// - protoc v3.11.4
|
|
||||||
// source: github.com/docker/buildx/controller/pb/controller.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
codes "google.golang.org/grpc/codes"
|
|
||||||
status "google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
// Requires gRPC-Go v1.64.0 or later.
|
|
||||||
const _ = grpc.SupportPackageIsVersion9
|
|
||||||
|
|
||||||
const (
|
|
||||||
Controller_Build_FullMethodName = "/buildx.controller.v1.Controller/Build"
|
|
||||||
Controller_Inspect_FullMethodName = "/buildx.controller.v1.Controller/Inspect"
|
|
||||||
Controller_Status_FullMethodName = "/buildx.controller.v1.Controller/Status"
|
|
||||||
Controller_Input_FullMethodName = "/buildx.controller.v1.Controller/Input"
|
|
||||||
Controller_Invoke_FullMethodName = "/buildx.controller.v1.Controller/Invoke"
|
|
||||||
Controller_List_FullMethodName = "/buildx.controller.v1.Controller/List"
|
|
||||||
Controller_Disconnect_FullMethodName = "/buildx.controller.v1.Controller/Disconnect"
|
|
||||||
Controller_Info_FullMethodName = "/buildx.controller.v1.Controller/Info"
|
|
||||||
Controller_ListProcesses_FullMethodName = "/buildx.controller.v1.Controller/ListProcesses"
|
|
||||||
Controller_DisconnectProcess_FullMethodName = "/buildx.controller.v1.Controller/DisconnectProcess"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ControllerClient is the client API for Controller service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
|
||||||
type ControllerClient interface {
|
|
||||||
Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error)
|
|
||||||
Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error)
|
|
||||||
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error)
|
|
||||||
Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error)
|
|
||||||
Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error)
|
|
||||||
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
|
|
||||||
Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error)
|
|
||||||
Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
|
|
||||||
ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error)
|
|
||||||
DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type controllerClient struct {
|
|
||||||
cc grpc.ClientConnInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient {
|
|
||||||
return &controllerClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(BuildResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_Build_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(InspectResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_Inspect_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[0], Controller_Status_FullMethodName, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &grpc.GenericClientStream[StatusRequest, StatusResponse]{ClientStream: stream}
|
|
||||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := x.ClientStream.CloseSend(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_StatusClient = grpc.ServerStreamingClient[StatusResponse]
|
|
||||||
|
|
||||||
func (c *controllerClient) Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[1], Controller_Input_FullMethodName, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &grpc.GenericClientStream[InputMessage, InputResponse]{ClientStream: stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_InputClient = grpc.ClientStreamingClient[InputMessage, InputResponse]
|
|
||||||
|
|
||||||
func (c *controllerClient) Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[2], Controller_Invoke_FullMethodName, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &grpc.GenericClientStream[Message, Message]{ClientStream: stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_InvokeClient = grpc.BidiStreamingClient[Message, Message]
|
|
||||||
|
|
||||||
func (c *controllerClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(ListResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_List_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(DisconnectResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_Disconnect_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(InfoResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_Info_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(ListProcessesResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_ListProcesses_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *controllerClient) DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error) {
|
|
||||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
|
||||||
out := new(DisconnectProcessResponse)
|
|
||||||
err := c.cc.Invoke(ctx, Controller_DisconnectProcess_FullMethodName, in, out, cOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ControllerServer is the server API for Controller service.
|
|
||||||
// All implementations should embed UnimplementedControllerServer
|
|
||||||
// for forward compatibility.
|
|
||||||
type ControllerServer interface {
|
|
||||||
Build(context.Context, *BuildRequest) (*BuildResponse, error)
|
|
||||||
Inspect(context.Context, *InspectRequest) (*InspectResponse, error)
|
|
||||||
Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error
|
|
||||||
Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error
|
|
||||||
Invoke(grpc.BidiStreamingServer[Message, Message]) error
|
|
||||||
List(context.Context, *ListRequest) (*ListResponse, error)
|
|
||||||
Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error)
|
|
||||||
Info(context.Context, *InfoRequest) (*InfoResponse, error)
|
|
||||||
ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error)
|
|
||||||
DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnimplementedControllerServer should be embedded to have
|
|
||||||
// forward compatible implementations.
|
|
||||||
//
|
|
||||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
|
||||||
// pointer dereference when methods are called.
|
|
||||||
type UnimplementedControllerServer struct{}
|
|
||||||
|
|
||||||
func (UnimplementedControllerServer) Build(context.Context, *BuildRequest) (*BuildResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method Build not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Inspect(context.Context, *InspectRequest) (*InspectResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method Inspect not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error {
|
|
||||||
return status.Errorf(codes.Unimplemented, "method Status not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error {
|
|
||||||
return status.Errorf(codes.Unimplemented, "method Input not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Invoke(grpc.BidiStreamingServer[Message, Message]) error {
|
|
||||||
return status.Errorf(codes.Unimplemented, "method Invoke not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) List(context.Context, *ListRequest) (*ListResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method Disconnect not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method ListProcesses not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method DisconnectProcess not implemented")
|
|
||||||
}
|
|
||||||
func (UnimplementedControllerServer) testEmbeddedByValue() {}
|
|
||||||
|
|
||||||
// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service.
|
|
||||||
// Use of this interface is not recommended, as added methods to ControllerServer will
|
|
||||||
// result in compilation errors.
|
|
||||||
type UnsafeControllerServer interface {
|
|
||||||
mustEmbedUnimplementedControllerServer()
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) {
|
|
||||||
// If the following call pancis, it indicates UnimplementedControllerServer was
|
|
||||||
// embedded by pointer and is nil. This will cause panics if an
|
|
||||||
// unimplemented method is ever invoked, so we test this at initialization
|
|
||||||
// time to prevent it from happening at runtime later due to I/O.
|
|
||||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
|
||||||
t.testEmbeddedByValue()
|
|
||||||
}
|
|
||||||
s.RegisterService(&Controller_ServiceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(BuildRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).Build(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_Build_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).Build(ctx, req.(*BuildRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_Inspect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(InspectRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).Inspect(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_Inspect_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).Inspect(ctx, req.(*InspectRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
m := new(StatusRequest)
|
|
||||||
if err := stream.RecvMsg(m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return srv.(ControllerServer).Status(m, &grpc.GenericServerStream[StatusRequest, StatusResponse]{ServerStream: stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_StatusServer = grpc.ServerStreamingServer[StatusResponse]
|
|
||||||
|
|
||||||
func _Controller_Input_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(ControllerServer).Input(&grpc.GenericServerStream[InputMessage, InputResponse]{ServerStream: stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_InputServer = grpc.ClientStreamingServer[InputMessage, InputResponse]
|
|
||||||
|
|
||||||
func _Controller_Invoke_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(ControllerServer).Invoke(&grpc.GenericServerStream[Message, Message]{ServerStream: stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
|
||||||
type Controller_InvokeServer = grpc.BidiStreamingServer[Message, Message]
|
|
||||||
|
|
||||||
func _Controller_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(ListRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).List(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_List_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).List(ctx, req.(*ListRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_Disconnect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(DisconnectRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).Disconnect(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_Disconnect_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).Disconnect(ctx, req.(*DisconnectRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(InfoRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).Info(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_Info_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).Info(ctx, req.(*InfoRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_ListProcesses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(ListProcessesRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).ListProcesses(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_ListProcesses_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).ListProcesses(ctx, req.(*ListProcessesRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Controller_DisconnectProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(DisconnectProcessRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(ControllerServer).DisconnectProcess(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: Controller_DisconnectProcess_FullMethodName,
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(ControllerServer).DisconnectProcess(ctx, req.(*DisconnectProcessRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service.
|
|
||||||
// It's only intended for direct use with grpc.RegisterService,
|
|
||||||
// and not to be introspected or modified (even as a copy)
|
|
||||||
var Controller_ServiceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "buildx.controller.v1.Controller",
|
|
||||||
HandlerType: (*ControllerServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "Build",
|
|
||||||
Handler: _Controller_Build_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "Inspect",
|
|
||||||
Handler: _Controller_Inspect_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "List",
|
|
||||||
Handler: _Controller_List_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "Disconnect",
|
|
||||||
Handler: _Controller_Disconnect_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "Info",
|
|
||||||
Handler: _Controller_Info_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "ListProcesses",
|
|
||||||
Handler: _Controller_ListProcesses_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "DisconnectProcess",
|
|
||||||
Handler: _Controller_DisconnectProcess_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{
|
|
||||||
{
|
|
||||||
StreamName: "Status",
|
|
||||||
Handler: _Controller_Status_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
StreamName: "Input",
|
|
||||||
Handler: _Controller_Input_Handler,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
StreamName: "Invoke",
|
|
||||||
Handler: _Controller_Invoke_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Metadata: "github.com/docker/buildx/controller/pb/controller.proto",
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package pb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -10,6 +11,12 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ExportEntry struct {
|
||||||
|
Type string
|
||||||
|
Attrs map[string]string
|
||||||
|
Destination string
|
||||||
|
}
|
||||||
|
|
||||||
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
|
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
|
||||||
var outs []client.ExportEntry
|
var outs []client.ExportEntry
|
||||||
var localPaths []string
|
var localPaths []string
|
||||||
@@ -26,9 +33,7 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, erro
|
|||||||
Type: entry.Type,
|
Type: entry.Type,
|
||||||
Attrs: map[string]string{},
|
Attrs: map[string]string{},
|
||||||
}
|
}
|
||||||
for k, v := range entry.Attrs {
|
maps.Copy(out.Attrs, entry.Attrs)
|
||||||
out.Attrs[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
supportFile := false
|
supportFile := false
|
||||||
supportDir := false
|
supportDir := false
|
||||||
|
40
controller/pb/invoke.go
Normal file
40
controller/pb/invoke.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CallFunc struct {
|
||||||
|
Name string
|
||||||
|
Format string
|
||||||
|
IgnoreStatus bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *CallFunc) String() string {
|
||||||
|
var elems []string
|
||||||
|
if x.Name != "" {
|
||||||
|
elems = append(elems, fmt.Sprintf("Name:%q", x.Name))
|
||||||
|
}
|
||||||
|
if x.Format != "" {
|
||||||
|
elems = append(elems, fmt.Sprintf("Format:%q", x.Format))
|
||||||
|
}
|
||||||
|
if x.IgnoreStatus {
|
||||||
|
elems = append(elems, fmt.Sprintf("IgnoreStatus:%v", x.IgnoreStatus))
|
||||||
|
}
|
||||||
|
return strings.Join(elems, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
type InvokeConfig struct {
|
||||||
|
Entrypoint []string
|
||||||
|
Cmd []string
|
||||||
|
NoCmd bool
|
||||||
|
Env []string
|
||||||
|
User string
|
||||||
|
NoUser bool
|
||||||
|
Cwd string
|
||||||
|
NoCwd bool
|
||||||
|
Tty bool
|
||||||
|
Rollback bool
|
||||||
|
Initial bool
|
||||||
|
}
|
@@ -1,162 +0,0 @@
|
|||||||
package pb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
control "github.com/moby/buildkit/api/services/control"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type writer struct {
|
|
||||||
ch chan<- *StatusResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
|
|
||||||
return &writer{ch: ch}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Write(status *client.SolveStatus) {
|
|
||||||
w.ch <- ToControlStatus(status)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) WriteBuildRef(target string, ref string) {}
|
|
||||||
|
|
||||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) ClearLogSource(interface{}) {}
|
|
||||||
|
|
||||||
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
|
||||||
resp := StatusResponse{}
|
|
||||||
for _, v := range s.Vertexes {
|
|
||||||
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
|
|
||||||
Digest: string(v.Digest),
|
|
||||||
Inputs: digestSliceToPB(v.Inputs),
|
|
||||||
Name: v.Name,
|
|
||||||
Started: timestampToPB(v.Started),
|
|
||||||
Completed: timestampToPB(v.Completed),
|
|
||||||
Error: v.Error,
|
|
||||||
Cached: v.Cached,
|
|
||||||
ProgressGroup: v.ProgressGroup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range s.Statuses {
|
|
||||||
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
|
|
||||||
ID: v.ID,
|
|
||||||
Vertex: string(v.Vertex),
|
|
||||||
Name: v.Name,
|
|
||||||
Total: v.Total,
|
|
||||||
Current: v.Current,
|
|
||||||
Timestamp: timestamppb.New(v.Timestamp),
|
|
||||||
Started: timestampToPB(v.Started),
|
|
||||||
Completed: timestampToPB(v.Completed),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range s.Logs {
|
|
||||||
resp.Logs = append(resp.Logs, &control.VertexLog{
|
|
||||||
Vertex: string(v.Vertex),
|
|
||||||
Stream: int64(v.Stream),
|
|
||||||
Msg: v.Data,
|
|
||||||
Timestamp: timestamppb.New(v.Timestamp),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range s.Warnings {
|
|
||||||
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
|
|
||||||
Vertex: string(v.Vertex),
|
|
||||||
Level: int64(v.Level),
|
|
||||||
Short: v.Short,
|
|
||||||
Detail: v.Detail,
|
|
||||||
Url: v.URL,
|
|
||||||
Info: v.SourceInfo,
|
|
||||||
Ranges: v.Range,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return &resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
|
|
||||||
s := client.SolveStatus{}
|
|
||||||
for _, v := range resp.Vertexes {
|
|
||||||
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
|
||||||
Digest: digest.Digest(v.Digest),
|
|
||||||
Inputs: digestSliceFromPB(v.Inputs),
|
|
||||||
Name: v.Name,
|
|
||||||
Started: timestampFromPB(v.Started),
|
|
||||||
Completed: timestampFromPB(v.Completed),
|
|
||||||
Error: v.Error,
|
|
||||||
Cached: v.Cached,
|
|
||||||
ProgressGroup: v.ProgressGroup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range resp.Statuses {
|
|
||||||
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
|
||||||
ID: v.ID,
|
|
||||||
Vertex: digest.Digest(v.Vertex),
|
|
||||||
Name: v.Name,
|
|
||||||
Total: v.Total,
|
|
||||||
Current: v.Current,
|
|
||||||
Timestamp: v.Timestamp.AsTime(),
|
|
||||||
Started: timestampFromPB(v.Started),
|
|
||||||
Completed: timestampFromPB(v.Completed),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range resp.Logs {
|
|
||||||
s.Logs = append(s.Logs, &client.VertexLog{
|
|
||||||
Vertex: digest.Digest(v.Vertex),
|
|
||||||
Stream: int(v.Stream),
|
|
||||||
Data: v.Msg,
|
|
||||||
Timestamp: v.Timestamp.AsTime(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range resp.Warnings {
|
|
||||||
s.Warnings = append(s.Warnings, &client.VertexWarning{
|
|
||||||
Vertex: digest.Digest(v.Vertex),
|
|
||||||
Level: int(v.Level),
|
|
||||||
Short: v.Short,
|
|
||||||
Detail: v.Detail,
|
|
||||||
URL: v.Url,
|
|
||||||
SourceInfo: v.Info,
|
|
||||||
Range: v.Ranges,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
func timestampFromPB(ts *timestamppb.Timestamp) *time.Time {
|
|
||||||
if ts == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
t := ts.AsTime()
|
|
||||||
if t.IsZero() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
|
|
||||||
func timestampToPB(ts *time.Time) *timestamppb.Timestamp {
|
|
||||||
if ts == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return timestamppb.New(*ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func digestSliceFromPB(elems []string) []digest.Digest {
|
|
||||||
clone := make([]digest.Digest, len(elems))
|
|
||||||
for i, e := range elems {
|
|
||||||
clone[i] = digest.Digest(e)
|
|
||||||
}
|
|
||||||
return clone
|
|
||||||
}
|
|
||||||
|
|
||||||
func digestSliceToPB(elems []digest.Digest) []string {
|
|
||||||
clone := make([]string, len(elems))
|
|
||||||
for i, e := range elems {
|
|
||||||
clone[i] = string(e)
|
|
||||||
}
|
|
||||||
return clone
|
|
||||||
}
|
|
@@ -5,6 +5,12 @@ import (
|
|||||||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Secret struct {
|
||||||
|
ID string
|
||||||
|
FilePath string
|
||||||
|
Env string
|
||||||
|
}
|
||||||
|
|
||||||
func CreateSecrets(secrets []*Secret) (session.Attachable, error) {
|
func CreateSecrets(secrets []*Secret) (session.Attachable, error) {
|
||||||
fs := make([]secretsprovider.Source, 0, len(secrets))
|
fs := make([]secretsprovider.Source, 0, len(secrets))
|
||||||
for _, secret := range secrets {
|
for _, secret := range secrets {
|
||||||
|
@@ -1,16 +1,23 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type SSH struct {
|
||||||
|
ID string
|
||||||
|
Paths []string
|
||||||
|
}
|
||||||
|
|
||||||
func CreateSSH(ssh []*SSH) (session.Attachable, error) {
|
func CreateSSH(ssh []*SSH) (session.Attachable, error) {
|
||||||
configs := make([]sshprovider.AgentConfig, 0, len(ssh))
|
configs := make([]sshprovider.AgentConfig, 0, len(ssh))
|
||||||
for _, ssh := range ssh {
|
for _, ssh := range ssh {
|
||||||
cfg := sshprovider.AgentConfig{
|
cfg := sshprovider.AgentConfig{
|
||||||
ID: ssh.ID,
|
ID: ssh.ID,
|
||||||
Paths: append([]string{}, ssh.Paths...),
|
Paths: slices.Clone(ssh.Paths),
|
||||||
}
|
}
|
||||||
configs = append(configs, cfg)
|
configs = append(configs, cfg)
|
||||||
}
|
}
|
||||||
|
11
controller/pb/ulimit.go
Normal file
11
controller/pb/ulimit.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package pb
|
||||||
|
|
||||||
|
type UlimitOpt struct {
|
||||||
|
Values map[string]*Ulimit
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ulimit struct {
|
||||||
|
Name string
|
||||||
|
Hard int64
|
||||||
|
Soft int64
|
||||||
|
}
|
@@ -39,7 +39,7 @@ func (p *Process) Done() <-chan error {
|
|||||||
return p.errCh
|
return p.errCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manager manages a set of proceses.
|
// Manager manages a set of processes.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
container atomic.Value
|
container atomic.Value
|
||||||
processes sync.Map
|
processes sync.Map
|
||||||
@@ -73,9 +73,9 @@ func (m *Manager) CancelRunningProcesses() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListProcesses lists all running processes.
|
// ListProcesses lists all running processes.
|
||||||
func (m *Manager) ListProcesses() (res []*pb.ProcessInfo) {
|
func (m *Manager) ListProcesses() (res []*ProcessInfo) {
|
||||||
m.processes.Range(func(key, value any) bool {
|
m.processes.Range(func(key, value any) bool {
|
||||||
res = append(res, &pb.ProcessInfo{
|
res = append(res, &ProcessInfo{
|
||||||
ProcessID: key.(string),
|
ProcessID: key.(string),
|
||||||
InvokeConfig: value.(*Process).invokeConfig,
|
InvokeConfig: value.(*Process).invokeConfig,
|
||||||
})
|
})
|
||||||
@@ -154,3 +154,8 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ProcessInfo struct {
|
||||||
|
ProcessID string
|
||||||
|
InvokeConfig *pb.InvokeConfig
|
||||||
|
}
|
||||||
|
@@ -1,243 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/v2/defaults"
|
|
||||||
"github.com/containerd/containerd/v2/pkg/dialer"
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
"github.com/docker/buildx/controller/pb"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
"github.com/moby/buildkit/identity"
|
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/backoff"
|
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewClient(ctx context.Context, addr string) (*Client, error) {
|
|
||||||
backoffConfig := backoff.DefaultConfig
|
|
||||||
backoffConfig.MaxDelay = 3 * time.Second
|
|
||||||
connParams := grpc.ConnectParams{
|
|
||||||
Backoff: backoffConfig,
|
|
||||||
}
|
|
||||||
gopts := []grpc.DialOption{
|
|
||||||
//nolint:staticcheck // ignore SA1019: WithBlock is deprecated and does not work with NewClient.
|
|
||||||
grpc.WithBlock(),
|
|
||||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
||||||
grpc.WithConnectParams(connParams),
|
|
||||||
grpc.WithContextDialer(dialer.ContextDialer),
|
|
||||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
|
||||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
|
||||||
grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor),
|
|
||||||
grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor),
|
|
||||||
}
|
|
||||||
//nolint:staticcheck // ignore SA1019: Recommended NewClient has different behavior from DialContext.
|
|
||||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(addr), gopts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Client{conn: conn}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Client struct {
|
|
||||||
conn *grpc.ClientConn
|
|
||||||
closeOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Close() (err error) {
|
|
||||||
c.closeOnce.Do(func() {
|
|
||||||
err = c.conn.Close()
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Version(ctx context.Context) (string, string, string, error) {
|
|
||||||
res, err := c.client().Info(ctx, &pb.InfoRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", err
|
|
||||||
}
|
|
||||||
v := res.BuildxVersion
|
|
||||||
return v.Package, v.Version, v.Revision, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) List(ctx context.Context) (keys []string, retErr error) {
|
|
||||||
res, err := c.client().List(ctx, &pb.ListRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Keys, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Disconnect(ctx context.Context, sessionID string) error {
|
|
||||||
if sessionID == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{SessionID: sessionID})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) ListProcesses(ctx context.Context, sessionID string) (infos []*pb.ProcessInfo, retErr error) {
|
|
||||||
res, err := c.client().ListProcesses(ctx, &pb.ListProcessesRequest{SessionID: sessionID})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Infos, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
|
|
||||||
_, err := c.client().DisconnectProcess(ctx, &pb.DisconnectProcessRequest{SessionID: sessionID, ProcessID: pid})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Invoke(ctx context.Context, sessionID string, pid string, invokeConfig *pb.InvokeConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
|
||||||
if sessionID == "" || pid == "" {
|
|
||||||
return errors.New("build session ID must be specified")
|
|
||||||
}
|
|
||||||
stream, err := c.client().Invoke(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return attachIO(ctx, stream, &pb.InitMessage{SessionID: sessionID, ProcessID: pid, InvokeConfig: invokeConfig}, ioAttachConfig{
|
|
||||||
stdin: in,
|
|
||||||
stdout: stdout,
|
|
||||||
stderr: stderr,
|
|
||||||
// TODO: Signal, Resize
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Inspect(ctx context.Context, sessionID string) (*pb.InspectResponse, error) {
|
|
||||||
return c.client().Inspect(ctx, &pb.InspectRequest{SessionID: sessionID})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Build(ctx context.Context, options *pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
|
||||||
ref := identity.NewID()
|
|
||||||
statusChan := make(chan *client.SolveStatus)
|
|
||||||
eg, egCtx := errgroup.WithContext(ctx)
|
|
||||||
var resp *client.SolveResponse
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(statusChan)
|
|
||||||
var err error
|
|
||||||
resp, err = c.build(egCtx, ref, options, in, statusChan)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
eg.Go(func() error {
|
|
||||||
for s := range statusChan {
|
|
||||||
st := s
|
|
||||||
progress.Write(st)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return ref, resp, nil, eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) build(ctx context.Context, sessionID string, options *pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
|
|
||||||
eg, egCtx := errgroup.WithContext(ctx)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
var resp *client.SolveResponse
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(done)
|
|
||||||
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
|
|
||||||
SessionID: sessionID,
|
|
||||||
Options: options,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp = &client.SolveResponse{
|
|
||||||
ExporterResponse: pbResp.ExporterResponse,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
eg.Go(func() error {
|
|
||||||
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
|
|
||||||
SessionID: sessionID,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
resp, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to receive status")
|
|
||||||
}
|
|
||||||
statusChan <- pb.FromControlStatus(resp)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if in != nil {
|
|
||||||
eg.Go(func() error {
|
|
||||||
stream, err := c.client().Input(egCtx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := stream.Send(&pb.InputMessage{
|
|
||||||
Input: &pb.InputMessage_Init{
|
|
||||||
Init: &pb.InputInitMessage{
|
|
||||||
SessionID: sessionID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to init input")
|
|
||||||
}
|
|
||||||
|
|
||||||
inReader, inWriter := io.Pipe()
|
|
||||||
eg2, _ := errgroup.WithContext(ctx)
|
|
||||||
eg2.Go(func() error {
|
|
||||||
<-done
|
|
||||||
return inWriter.Close()
|
|
||||||
})
|
|
||||||
go func() {
|
|
||||||
// do not wait for read completion but return here and let the caller send EOF
|
|
||||||
// this allows us to return on ctx.Done() without being blocked by this reader.
|
|
||||||
io.Copy(inWriter, in)
|
|
||||||
inWriter.Close()
|
|
||||||
}()
|
|
||||||
eg2.Go(func() error {
|
|
||||||
for {
|
|
||||||
buf := make([]byte, 32*1024)
|
|
||||||
n, err := inReader.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break // break loop and send EOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
} else if n > 0 {
|
|
||||||
if err := stream.Send(&pb.InputMessage{
|
|
||||||
Input: &pb.InputMessage_Data{
|
|
||||||
Data: &pb.DataMessage{
|
|
||||||
Data: buf[:n],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stream.Send(&pb.InputMessage{
|
|
||||||
Input: &pb.InputMessage_Data{
|
|
||||||
Data: &pb.DataMessage{
|
|
||||||
EOF: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return eg2.Wait()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return resp, eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) client() pb.ControllerClient {
|
|
||||||
return pb.NewControllerClient(c.conn)
|
|
||||||
}
|
|
@@ -1,335 +0,0 @@
|
|||||||
//go:build linux
|
|
||||||
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
cbuild "github.com/docker/buildx/controller/build"
|
|
||||||
"github.com/docker/buildx/controller/control"
|
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/buildx/version"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
"github.com/pelletier/go-toml"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
serveCommandName = "_INTERNAL_SERVE"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultLogFilename = fmt.Sprintf("buildx.%s.log", version.Revision)
|
|
||||||
defaultSocketFilename = fmt.Sprintf("buildx.%s.sock", version.Revision)
|
|
||||||
defaultPIDFilename = fmt.Sprintf("buildx.%s.pid", version.Revision)
|
|
||||||
)
|
|
||||||
|
|
||||||
type serverConfig struct {
|
|
||||||
// Specify buildx server root
|
|
||||||
Root string `toml:"root"`
|
|
||||||
|
|
||||||
// LogLevel sets the logging level [trace, debug, info, warn, error, fatal, panic]
|
|
||||||
LogLevel string `toml:"log_level"`
|
|
||||||
|
|
||||||
// Specify file to output buildx server log
|
|
||||||
LogFile string `toml:"log_file"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
|
|
||||||
rootDir := opts.Root
|
|
||||||
if rootDir == "" {
|
|
||||||
rootDir = rootDataDir(dockerCli)
|
|
||||||
}
|
|
||||||
serverRoot := filepath.Join(rootDir, "shared")
|
|
||||||
|
|
||||||
// connect to buildx server if it is already running
|
|
||||||
ctx2, cancel := context.WithCancelCause(ctx)
|
|
||||||
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
|
||||||
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
|
||||||
cancel(errors.WithStack(context.Canceled))
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, context.DeadlineExceeded) {
|
|
||||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return &buildxController{c, serverRoot}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// start buildx server via subcommand
|
|
||||||
err = logger.Wrap("no buildx server found; launching...", func() error {
|
|
||||||
launchFlags := []string{}
|
|
||||||
if opts.ServerConfig != "" {
|
|
||||||
launchFlags = append(launchFlags, "--config", opts.ServerConfig)
|
|
||||||
}
|
|
||||||
logFile, err := getLogFilePath(dockerCli, opts.ServerConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
go wait()
|
|
||||||
|
|
||||||
// wait for buildx server to be ready
|
|
||||||
ctx2, cancel = context.WithCancelCause(ctx)
|
|
||||||
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
|
||||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
|
||||||
cancel(errors.WithStack(context.Canceled))
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &buildxController{c, serverRoot}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|
||||||
cmd.AddCommand(
|
|
||||||
serveCmd(dockerCli),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|
||||||
var serverConfigPath string
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: fmt.Sprintf("%s [OPTIONS]", serveCommandName),
|
|
||||||
Hidden: true,
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
// Parse config
|
|
||||||
config, err := getConfig(dockerCli, serverConfigPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if config.LogLevel == "" {
|
|
||||||
logrus.SetLevel(logrus.InfoLevel)
|
|
||||||
} else {
|
|
||||||
lvl, err := logrus.ParseLevel(config.LogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to prepare logger")
|
|
||||||
}
|
|
||||||
logrus.SetLevel(lvl)
|
|
||||||
}
|
|
||||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
|
||||||
TimestampFormat: log.RFC3339NanoFixed,
|
|
||||||
})
|
|
||||||
root, err := prepareRootDir(dockerCli, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pidF := filepath.Join(root, defaultPIDFilename)
|
|
||||||
if err := os.WriteFile(pidF, []byte(fmt.Sprintf("%d", os.Getpid())), 0600); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := os.Remove(pidF); err != nil {
|
|
||||||
logrus.Errorf("failed to clean up info file %q: %v", pidF, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// prepare server
|
|
||||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
|
||||||
return cbuild.RunBuild(ctx, dockerCli, options, stdin, progress, true)
|
|
||||||
})
|
|
||||||
defer b.Close()
|
|
||||||
|
|
||||||
// serve server
|
|
||||||
addr := filepath.Join(root, defaultSocketFilename)
|
|
||||||
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { // avoid EADDRINUSE
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := os.Remove(addr); err != nil {
|
|
||||||
logrus.Errorf("failed to clean up socket %q: %v", addr, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
logrus.Infof("starting server at %q", addr)
|
|
||||||
l, err := net.Listen("unix", addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rpc := grpc.NewServer(
|
|
||||||
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
|
|
||||||
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
|
|
||||||
)
|
|
||||||
controllerapi.RegisterControllerServer(rpc, b)
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
defer close(doneCh)
|
|
||||||
if err := rpc.Serve(l); err != nil {
|
|
||||||
errCh <- errors.Wrapf(err, "error on serving via socket %q", addr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var s os.Signal
|
|
||||||
sigCh := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sigCh, syscall.SIGINT)
|
|
||||||
signal.Notify(sigCh, syscall.SIGTERM)
|
|
||||||
select {
|
|
||||||
case err := <-errCh:
|
|
||||||
logrus.Errorf("got error %s, exiting", err)
|
|
||||||
return err
|
|
||||||
case s = <-sigCh:
|
|
||||||
logrus.Infof("got signal %s, exiting", s)
|
|
||||||
return nil
|
|
||||||
case <-doneCh:
|
|
||||||
logrus.Infof("rpc server done, exiting")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := cmd.Flags()
|
|
||||||
flags.StringVar(&serverConfigPath, "config", "", "Specify buildx server config file")
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLogFilePath(dockerCli command.Cli, configPath string) (string, error) {
|
|
||||||
config, err := getConfig(dockerCli, configPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if config.LogFile == "" {
|
|
||||||
root, err := prepareRootDir(dockerCli, config)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return filepath.Join(root, defaultLogFilename), nil
|
|
||||||
}
|
|
||||||
return config.LogFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConfig(dockerCli command.Cli, configPath string) (*serverConfig, error) {
|
|
||||||
var defaultConfigPath bool
|
|
||||||
if configPath == "" {
|
|
||||||
defaultRoot := rootDataDir(dockerCli)
|
|
||||||
configPath = filepath.Join(defaultRoot, "config.toml")
|
|
||||||
defaultConfigPath = true
|
|
||||||
}
|
|
||||||
var config serverConfig
|
|
||||||
tree, err := toml.LoadFile(configPath)
|
|
||||||
if err != nil && !(os.IsNotExist(err) && defaultConfigPath) {
|
|
||||||
return nil, errors.Wrapf(err, "failed to read config %q", configPath)
|
|
||||||
} else if err == nil {
|
|
||||||
if err := tree.Unmarshal(&config); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to unmarshal config %q", configPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareRootDir(dockerCli command.Cli, config *serverConfig) (string, error) {
|
|
||||||
rootDir := config.Root
|
|
||||||
if rootDir == "" {
|
|
||||||
rootDir = rootDataDir(dockerCli)
|
|
||||||
}
|
|
||||||
if rootDir == "" {
|
|
||||||
return "", errors.New("buildx root dir must be determined")
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(rootDir, 0700); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
serverRoot := filepath.Join(rootDir, "shared")
|
|
||||||
if err := os.MkdirAll(serverRoot, 0700); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return serverRoot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rootDataDir(dockerCli command.Cli) string {
|
|
||||||
return filepath.Join(confutil.NewConfig(dockerCli).Dir(), "controller")
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuildxClientAndCheck(ctx context.Context, addr string) (*Client, error) {
|
|
||||||
c, err := NewClient(ctx, addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p, v, r, err := c.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
logrus.Debugf("connected to server (\"%v %v %v\")", p, v, r)
|
|
||||||
if !(p == version.Package && v == version.Version && r == version.Revision) {
|
|
||||||
return nil, errors.Errorf("version mismatch (client: \"%v %v %v\", server: \"%v %v %v\")", version.Package, version.Version, version.Revision, p, v, r)
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type buildxController struct {
|
|
||||||
*Client
|
|
||||||
serverRoot string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *buildxController) Kill(ctx context.Context) error {
|
|
||||||
pidB, err := os.ReadFile(filepath.Join(c.serverRoot, defaultPIDFilename))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pid, err := strconv.ParseInt(string(pidB), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if pid <= 0 {
|
|
||||||
return errors.New("no PID is recorded for buildx server")
|
|
||||||
}
|
|
||||||
p, err := os.FindProcess(int(pid))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.Signal(syscall.SIGINT); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// TODO: Should we send SIGKILL if process doesn't finish?
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func launch(ctx context.Context, logFile string, args ...string) (func() error, error) {
|
|
||||||
// set absolute path of binary, since we set the working directory to the root
|
|
||||||
pathname, err := os.Executable()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bCmd := exec.CommandContext(ctx, pathname, args...)
|
|
||||||
if logFile != "" {
|
|
||||||
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
bCmd.Stdout = f
|
|
||||||
bCmd.Stderr = f
|
|
||||||
}
|
|
||||||
bCmd.Stdin = nil
|
|
||||||
bCmd.Dir = "/"
|
|
||||||
bCmd.SysProcAttr = &syscall.SysProcAttr{
|
|
||||||
Setsid: true,
|
|
||||||
}
|
|
||||||
if err := bCmd.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bCmd.Wait, nil
|
|
||||||
}
|
|
@@ -1,19 +0,0 @@
|
|||||||
//go:build !linux
|
|
||||||
|
|
||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/controller/control"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
|
|
||||||
return nil, errors.New("remote buildx unsupported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {}
|
|
@@ -1,430 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/controller/pb"
|
|
||||||
"github.com/moby/sys/signal"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
type msgStream interface {
|
|
||||||
Send(*pb.Message) error
|
|
||||||
Recv() (*pb.Message, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ioServerConfig struct {
|
|
||||||
stdin io.WriteCloser
|
|
||||||
stdout, stderr io.ReadCloser
|
|
||||||
|
|
||||||
// signalFn is a callback function called when a signal is reached to the client.
|
|
||||||
signalFn func(context.Context, syscall.Signal) error
|
|
||||||
|
|
||||||
// resizeFn is a callback function called when a resize event is reached to the client.
|
|
||||||
resizeFn func(context.Context, winSize) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func serveIO(attachCtx context.Context, srv msgStream, initFn func(*pb.InitMessage) error, ioConfig *ioServerConfig) (err error) {
|
|
||||||
stdin, stdout, stderr := ioConfig.stdin, ioConfig.stdout, ioConfig.stderr
|
|
||||||
stream := &debugStream{srv, "server=" + time.Now().String()}
|
|
||||||
eg, ctx := errgroup.WithContext(attachCtx)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
msg, err := receive(ctx, stream)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
init := msg.GetInit()
|
|
||||||
if init == nil {
|
|
||||||
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInput())
|
|
||||||
}
|
|
||||||
sessionID := init.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return errors.New("no session ID is provided")
|
|
||||||
}
|
|
||||||
if err := initFn(init); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to initialize IO server")
|
|
||||||
}
|
|
||||||
|
|
||||||
if stdout != nil {
|
|
||||||
stdoutReader, stdoutWriter := io.Pipe()
|
|
||||||
eg.Go(func() error {
|
|
||||||
<-done
|
|
||||||
return stdoutWriter.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// do not wait for read completion but return here and let the caller send EOF
|
|
||||||
// this allows us to return on ctx.Done() without being blocked by this reader.
|
|
||||||
io.Copy(stdoutWriter, stdout)
|
|
||||||
stdoutWriter.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer stdoutReader.Close()
|
|
||||||
return copyToStream(1, stream, stdoutReader)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if stderr != nil {
|
|
||||||
stderrReader, stderrWriter := io.Pipe()
|
|
||||||
eg.Go(func() error {
|
|
||||||
<-done
|
|
||||||
return stderrWriter.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// do not wait for read completion but return here and let the caller send EOF
|
|
||||||
// this allows us to return on ctx.Done() without being blocked by this reader.
|
|
||||||
io.Copy(stderrWriter, stderr)
|
|
||||||
stderrWriter.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer stderrReader.Close()
|
|
||||||
return copyToStream(2, stream, stderrReader)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
msgCh := make(chan *pb.Message)
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(msgCh)
|
|
||||||
for {
|
|
||||||
msg, err := receive(ctx, stream)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case msgCh <- msg:
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(done)
|
|
||||||
for {
|
|
||||||
var msg *pb.Message
|
|
||||||
select {
|
|
||||||
case msg = <-msgCh:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if msg == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if file := msg.GetFile(); file != nil {
|
|
||||||
if file.Fd != 0 {
|
|
||||||
return errors.Errorf("unexpected fd: %v", file.Fd)
|
|
||||||
}
|
|
||||||
if stdin == nil {
|
|
||||||
continue // no stdin destination is specified so ignore the data
|
|
||||||
}
|
|
||||||
if len(file.Data) > 0 {
|
|
||||||
_, err := stdin.Write(file.Data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if file.EOF {
|
|
||||||
stdin.Close()
|
|
||||||
}
|
|
||||||
} else if resize := msg.GetResize(); resize != nil {
|
|
||||||
if ioConfig.resizeFn != nil {
|
|
||||||
ioConfig.resizeFn(ctx, winSize{
|
|
||||||
cols: resize.Cols,
|
|
||||||
rows: resize.Rows,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else if sig := msg.GetSignal(); sig != nil {
|
|
||||||
if ioConfig.signalFn != nil {
|
|
||||||
syscallSignal, ok := signal.SignalMap[sig.Name]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ioConfig.signalFn(ctx, syscallSignal)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return errors.Errorf("unexpected message: %T", msg.GetInput())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
type ioAttachConfig struct {
|
|
||||||
stdin io.ReadCloser
|
|
||||||
stdout, stderr io.WriteCloser
|
|
||||||
signal <-chan syscall.Signal
|
|
||||||
resize <-chan winSize
|
|
||||||
}
|
|
||||||
|
|
||||||
type winSize struct {
|
|
||||||
rows uint32
|
|
||||||
cols uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage, cfg ioAttachConfig) (retErr error) {
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
if err := stream.Send(&pb.Message{
|
|
||||||
Input: &pb.Message_Init{
|
|
||||||
Init: initMessage,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to init")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.stdin != nil {
|
|
||||||
stdinReader, stdinWriter := io.Pipe()
|
|
||||||
eg.Go(func() error {
|
|
||||||
<-done
|
|
||||||
return stdinWriter.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// do not wait for read completion but return here and let the caller send EOF
|
|
||||||
// this allows us to return on ctx.Done() without being blocked by this reader.
|
|
||||||
io.Copy(stdinWriter, cfg.stdin)
|
|
||||||
stdinWriter.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer stdinReader.Close()
|
|
||||||
return copyToStream(0, stream, stdinReader)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.signal != nil {
|
|
||||||
eg.Go(func() error {
|
|
||||||
names := signalNames()
|
|
||||||
for {
|
|
||||||
var sig syscall.Signal
|
|
||||||
select {
|
|
||||||
case sig = <-cfg.signal:
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
name := names[sig]
|
|
||||||
if name == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := stream.Send(&pb.Message{
|
|
||||||
Input: &pb.Message_Signal{
|
|
||||||
Signal: &pb.SignalMessage{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to send signal")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.resize != nil {
|
|
||||||
eg.Go(func() error {
|
|
||||||
for {
|
|
||||||
var win winSize
|
|
||||||
select {
|
|
||||||
case win = <-cfg.resize:
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := stream.Send(&pb.Message{
|
|
||||||
Input: &pb.Message_Resize{
|
|
||||||
Resize: &pb.ResizeMessage{
|
|
||||||
Rows: win.rows,
|
|
||||||
Cols: win.cols,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to send resize")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
msgCh := make(chan *pb.Message)
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(msgCh)
|
|
||||||
for {
|
|
||||||
msg, err := receive(ctx, stream)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case msgCh <- msg:
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
eofs := make(map[uint32]struct{})
|
|
||||||
defer close(done)
|
|
||||||
for {
|
|
||||||
var msg *pb.Message
|
|
||||||
select {
|
|
||||||
case msg = <-msgCh:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if msg == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if file := msg.GetFile(); file != nil {
|
|
||||||
if _, ok := eofs[file.Fd]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var out io.WriteCloser
|
|
||||||
switch file.Fd {
|
|
||||||
case 1:
|
|
||||||
out = cfg.stdout
|
|
||||||
case 2:
|
|
||||||
out = cfg.stderr
|
|
||||||
default:
|
|
||||||
return errors.Errorf("unsupported fd %d", file.Fd)
|
|
||||||
}
|
|
||||||
if out == nil {
|
|
||||||
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(file.Data) > 0 {
|
|
||||||
if _, err := out.Write(file.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if file.EOF {
|
|
||||||
eofs[file.Fd] = struct{}{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return errors.Errorf("unexpected message: %T", msg.GetInput())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
|
|
||||||
msgCh := make(chan *pb.Message)
|
|
||||||
errCh := make(chan error)
|
|
||||||
go func() {
|
|
||||||
msg, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
msgCh <- msg
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case msg := <-msgCh:
|
|
||||||
return msg, nil
|
|
||||||
case err := <-errCh:
|
|
||||||
return nil, err
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, context.Cause(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
|
|
||||||
for {
|
|
||||||
buf := make([]byte, 32*1024)
|
|
||||||
n, err := r.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break // break loop and send EOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
} else if n > 0 {
|
|
||||||
if err := snd.Send(&pb.Message{
|
|
||||||
Input: &pb.Message_File{
|
|
||||||
File: &pb.FdMessage{
|
|
||||||
Fd: fd,
|
|
||||||
Data: buf[:n],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return snd.Send(&pb.Message{
|
|
||||||
Input: &pb.Message_File{
|
|
||||||
File: &pb.FdMessage{
|
|
||||||
Fd: fd,
|
|
||||||
EOF: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func signalNames() map[syscall.Signal]string {
|
|
||||||
m := make(map[syscall.Signal]string, len(signal.SignalMap))
|
|
||||||
for name, value := range signal.SignalMap {
|
|
||||||
m[value] = name
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type debugStream struct {
|
|
||||||
msgStream
|
|
||||||
prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *debugStream) Send(msg *pb.Message) error {
|
|
||||||
switch m := msg.GetInput().(type) {
|
|
||||||
case *pb.Message_File:
|
|
||||||
if m.File.EOF {
|
|
||||||
logrus.Debugf("|---> File Message (sender:%v) fd=%d, EOF", s.prefix, m.File.Fd)
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("|---> File Message (sender:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
|
|
||||||
}
|
|
||||||
case *pb.Message_Resize:
|
|
||||||
logrus.Debugf("|---> Resize Message (sender:%v): %+v", s.prefix, m.Resize)
|
|
||||||
case *pb.Message_Signal:
|
|
||||||
logrus.Debugf("|---> Signal Message (sender:%v): %s", s.prefix, m.Signal.Name)
|
|
||||||
}
|
|
||||||
return s.msgStream.Send(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *debugStream) Recv() (*pb.Message, error) {
|
|
||||||
msg, err := s.msgStream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch m := msg.GetInput().(type) {
|
|
||||||
case *pb.Message_File:
|
|
||||||
if m.File.EOF {
|
|
||||||
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, EOF", s.prefix, m.File.Fd)
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
|
|
||||||
}
|
|
||||||
case *pb.Message_Resize:
|
|
||||||
logrus.Debugf("|<--- Resize Message (receiver:%v): %+v", s.prefix, m.Resize)
|
|
||||||
case *pb.Message_Signal:
|
|
||||||
logrus.Debugf("|<--- Signal Message (receiver:%v): %s", s.prefix, m.Signal.Name)
|
|
||||||
}
|
|
||||||
return msg, nil
|
|
||||||
}
|
|
@@ -1,445 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
|
||||||
"github.com/docker/buildx/controller/pb"
|
|
||||||
"github.com/docker/buildx/controller/processes"
|
|
||||||
"github.com/docker/buildx/util/desktop"
|
|
||||||
"github.com/docker/buildx/util/ioset"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/buildx/version"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, inp *build.Inputs, err error)
|
|
||||||
|
|
||||||
func NewServer(buildFunc BuildFunc) *Server {
|
|
||||||
return &Server{
|
|
||||||
buildFunc: buildFunc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Server struct {
|
|
||||||
buildFunc BuildFunc
|
|
||||||
session map[string]*session
|
|
||||||
sessionMu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type session struct {
|
|
||||||
buildOnGoing atomic.Bool
|
|
||||||
statusChan chan *pb.StatusResponse
|
|
||||||
cancelBuild func(error)
|
|
||||||
buildOptions *pb.BuildOptions
|
|
||||||
inputPipe *io.PipeWriter
|
|
||||||
|
|
||||||
result *build.ResultHandle
|
|
||||||
|
|
||||||
processes *processes.Manager
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *session) cancelRunningProcesses() {
|
|
||||||
s.processes.CancelRunningProcesses()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (res *pb.ListProcessesResponse, err error) {
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
defer m.sessionMu.Unlock()
|
|
||||||
s, ok := m.session[req.SessionID]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
|
|
||||||
}
|
|
||||||
res = new(pb.ListProcessesResponse)
|
|
||||||
res.Infos = append(res.Infos, s.processes.ListProcesses()...)
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) DisconnectProcess(ctx context.Context, req *pb.DisconnectProcessRequest) (res *pb.DisconnectProcessResponse, err error) {
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
defer m.sessionMu.Unlock()
|
|
||||||
s, ok := m.session[req.SessionID]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
|
|
||||||
}
|
|
||||||
return res, s.processes.DeleteProcess(req.ProcessID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Info(ctx context.Context, req *pb.InfoRequest) (res *pb.InfoResponse, err error) {
|
|
||||||
return &pb.InfoResponse{
|
|
||||||
BuildxVersion: &pb.BuildxVersion{
|
|
||||||
Package: version.Package,
|
|
||||||
Version: version.Version,
|
|
||||||
Revision: version.Revision,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) List(ctx context.Context, req *pb.ListRequest) (res *pb.ListResponse, err error) {
|
|
||||||
keys := make(map[string]struct{})
|
|
||||||
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
for k := range m.session {
|
|
||||||
keys[k] = struct{}{}
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
|
|
||||||
var keysL []string
|
|
||||||
for k := range keys {
|
|
||||||
keysL = append(keysL, k)
|
|
||||||
}
|
|
||||||
return &pb.ListResponse{
|
|
||||||
Keys: keysL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
|
|
||||||
sessionID := req.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return nil, errors.New("disconnect: empty session ID")
|
|
||||||
}
|
|
||||||
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if s, ok := m.session[sessionID]; ok {
|
|
||||||
if s.cancelBuild != nil {
|
|
||||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
|
||||||
}
|
|
||||||
s.cancelRunningProcesses()
|
|
||||||
if s.result != nil {
|
|
||||||
s.result.Done()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(m.session, sessionID)
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
|
|
||||||
return &pb.DisconnectResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Close() error {
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
for k := range m.session {
|
|
||||||
if s, ok := m.session[k]; ok {
|
|
||||||
if s.cancelBuild != nil {
|
|
||||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
|
||||||
}
|
|
||||||
s.cancelRunningProcesses()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Inspect(ctx context.Context, req *pb.InspectRequest) (*pb.InspectResponse, error) {
|
|
||||||
sessionID := req.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return nil, errors.New("inspect: empty session ID")
|
|
||||||
}
|
|
||||||
var bo *pb.BuildOptions
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if s, ok := m.session[sessionID]; ok {
|
|
||||||
bo = s.buildOptions
|
|
||||||
} else {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return nil, errors.Errorf("inspect: unknown key %v", sessionID)
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return &pb.InspectResponse{Options: bo}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResponse, error) {
|
|
||||||
sessionID := req.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return nil, errors.New("build: empty session ID")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare status channel and session
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if m.session == nil {
|
|
||||||
m.session = make(map[string]*session)
|
|
||||||
}
|
|
||||||
s, ok := m.session[sessionID]
|
|
||||||
if ok {
|
|
||||||
if !s.buildOnGoing.CompareAndSwap(false, true) {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return &pb.BuildResponse{}, errors.New("build ongoing")
|
|
||||||
}
|
|
||||||
s.cancelRunningProcesses()
|
|
||||||
s.result = nil
|
|
||||||
} else {
|
|
||||||
s = &session{}
|
|
||||||
s.buildOnGoing.Store(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.processes = processes.NewManager()
|
|
||||||
statusChan := make(chan *pb.StatusResponse)
|
|
||||||
s.statusChan = statusChan
|
|
||||||
inR, inW := io.Pipe()
|
|
||||||
defer inR.Close()
|
|
||||||
s.inputPipe = inW
|
|
||||||
m.session[sessionID] = s
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
defer func() {
|
|
||||||
close(statusChan)
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
s, ok := m.session[sessionID]
|
|
||||||
if ok {
|
|
||||||
s.statusChan = nil
|
|
||||||
s.buildOnGoing.Store(false)
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
pw := pb.NewProgressWriter(statusChan)
|
|
||||||
|
|
||||||
// Build the specified request
|
|
||||||
ctx, cancel := context.WithCancelCause(ctx)
|
|
||||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
|
||||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if s, ok := m.session[sessionID]; ok {
|
|
||||||
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
|
||||||
if res != nil {
|
|
||||||
s.result = res
|
|
||||||
s.cancelBuild = cancel
|
|
||||||
s.buildOptions = req.Options
|
|
||||||
m.session[sessionID] = s
|
|
||||||
if buildErr != nil {
|
|
||||||
var ref string
|
|
||||||
var ebr *desktop.ErrorWithBuildRef
|
|
||||||
if errors.As(buildErr, &ebr) {
|
|
||||||
ref = ebr.Ref
|
|
||||||
}
|
|
||||||
buildErr = controllererrors.WrapBuild(buildErr, sessionID, ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return nil, errors.Errorf("build: unknown session ID %v", sessionID)
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
|
|
||||||
if buildErr != nil {
|
|
||||||
return nil, buildErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp == nil {
|
|
||||||
resp = &client.SolveResponse{}
|
|
||||||
}
|
|
||||||
return &pb.BuildResponse{
|
|
||||||
ExporterResponse: resp.ExporterResponse,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error {
|
|
||||||
sessionID := req.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return errors.New("status: empty session ID")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait and get status channel prepared by Build()
|
|
||||||
var statusChan <-chan *pb.StatusResponse
|
|
||||||
for {
|
|
||||||
// TODO: timeout?
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].statusChan == nil {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
statusChan = m.session[sessionID].statusChan
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// forward status
|
|
||||||
for ss := range statusChan {
|
|
||||||
if ss == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err := stream.Send(ss); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
|
|
||||||
// Get the target ref from init message
|
|
||||||
msg, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, io.EOF) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
init := msg.GetInit()
|
|
||||||
if init == nil {
|
|
||||||
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInit())
|
|
||||||
}
|
|
||||||
sessionID := init.SessionID
|
|
||||||
if sessionID == "" {
|
|
||||||
return errors.New("input: no session ID is provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait and get input stream pipe prepared by Build()
|
|
||||||
var inputPipeW *io.PipeWriter
|
|
||||||
for {
|
|
||||||
// TODO: timeout?
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].inputPipe == nil {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
inputPipeW = m.session[sessionID].inputPipe
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forward input stream
|
|
||||||
eg, ctx := errgroup.WithContext(context.TODO())
|
|
||||||
done := make(chan struct{})
|
|
||||||
msgCh := make(chan *pb.InputMessage)
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer close(msgCh)
|
|
||||||
for {
|
|
||||||
msg, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, io.EOF) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case msgCh <- msg:
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
eg.Go(func() (retErr error) {
|
|
||||||
defer close(done)
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil {
|
|
||||||
inputPipeW.CloseWithError(retErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
inputPipeW.Close()
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
var msg *pb.InputMessage
|
|
||||||
select {
|
|
||||||
case msg = <-msgCh:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return context.Cause(ctx)
|
|
||||||
}
|
|
||||||
if msg == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if data := msg.GetData(); data != nil {
|
|
||||||
if len(data.Data) > 0 {
|
|
||||||
_, err := inputPipeW.Write(data.Data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if data.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|
||||||
containerIn, containerOut := ioset.Pipe()
|
|
||||||
defer func() { containerOut.Close(); containerIn.Close() }()
|
|
||||||
|
|
||||||
initDoneCh := make(chan *processes.Process)
|
|
||||||
initErrCh := make(chan error)
|
|
||||||
eg, egCtx := errgroup.WithContext(context.TODO())
|
|
||||||
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
|
||||||
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil {
|
|
||||||
initErrCh <- retErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
sessionID := initMessage.SessionID
|
|
||||||
cfg := initMessage.InvokeConfig
|
|
||||||
|
|
||||||
m.sessionMu.Lock()
|
|
||||||
s, ok := m.session[sessionID]
|
|
||||||
if !ok {
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
return errors.Errorf("invoke: unknown session ID %v", sessionID)
|
|
||||||
}
|
|
||||||
m.sessionMu.Unlock()
|
|
||||||
|
|
||||||
pid := initMessage.ProcessID
|
|
||||||
if pid == "" {
|
|
||||||
return errors.Errorf("invoke: specify process ID")
|
|
||||||
}
|
|
||||||
proc, ok := s.processes.Get(pid)
|
|
||||||
if !ok {
|
|
||||||
// Start a new process.
|
|
||||||
if cfg == nil {
|
|
||||||
return errors.New("no container config is provided")
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
proc, err = s.processes.StartProcess(pid, s.result, cfg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Attach containerIn to this process
|
|
||||||
proc.ForwardIO(&containerIn, srvIOCancel)
|
|
||||||
initDoneCh <- proc
|
|
||||||
return nil
|
|
||||||
}, &ioServerConfig{
|
|
||||||
stdin: containerOut.Stdin,
|
|
||||||
stdout: containerOut.Stdout,
|
|
||||||
stderr: containerOut.Stderr,
|
|
||||||
// TODO: signal, resize
|
|
||||||
})
|
|
||||||
})
|
|
||||||
eg.Go(func() (rErr error) {
|
|
||||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
|
||||||
// Wait for init done
|
|
||||||
var proc *processes.Process
|
|
||||||
select {
|
|
||||||
case p := <-initDoneCh:
|
|
||||||
proc = p
|
|
||||||
case err := <-initErrCh:
|
|
||||||
return err
|
|
||||||
case <-egCtx.Done():
|
|
||||||
return egCtx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for IO done
|
|
||||||
select {
|
|
||||||
case <-srvIOCtx.Done():
|
|
||||||
return srvIOCtx.Err()
|
|
||||||
case err := <-proc.Done():
|
|
||||||
return err
|
|
||||||
case <-egCtx.Done():
|
|
||||||
return egCtx.Err()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
@@ -48,6 +48,8 @@ target "lint" {
|
|||||||
"linux/s390x",
|
"linux/s390x",
|
||||||
"linux/ppc64le",
|
"linux/ppc64le",
|
||||||
"linux/riscv64",
|
"linux/riscv64",
|
||||||
|
"netbsd/amd64",
|
||||||
|
"netbsd/arm64",
|
||||||
"openbsd/amd64",
|
"openbsd/amd64",
|
||||||
"openbsd/arm64",
|
"openbsd/arm64",
|
||||||
"windows/amd64",
|
"windows/amd64",
|
||||||
@@ -167,6 +169,8 @@ target "binaries-cross" {
|
|||||||
"linux/ppc64le",
|
"linux/ppc64le",
|
||||||
"linux/riscv64",
|
"linux/riscv64",
|
||||||
"linux/s390x",
|
"linux/s390x",
|
||||||
|
"netbsd/amd64",
|
||||||
|
"netbsd/arm64",
|
||||||
"openbsd/amd64",
|
"openbsd/amd64",
|
||||||
"openbsd/arm64",
|
"openbsd/arm64",
|
||||||
"windows/amd64",
|
"windows/amd64",
|
||||||
|
@@ -347,18 +347,22 @@ is defined in https://golang.org/pkg/path/#Match.
|
|||||||
```console
|
```console
|
||||||
$ docker buildx bake --set target.args.mybuildarg=value
|
$ docker buildx bake --set target.args.mybuildarg=value
|
||||||
$ docker buildx bake --set target.platform=linux/arm64
|
$ docker buildx bake --set target.platform=linux/arm64
|
||||||
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
||||||
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
||||||
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
||||||
|
$ docker buildx bake --set target.platform+=linux/arm64 # appends 'linux/arm64' to the platform list
|
||||||
```
|
```
|
||||||
|
|
||||||
You can override the following fields:
|
You can override the following fields:
|
||||||
|
|
||||||
|
* `annotations`
|
||||||
|
* `attest`
|
||||||
* `args`
|
* `args`
|
||||||
* `cache-from`
|
* `cache-from`
|
||||||
* `cache-to`
|
* `cache-to`
|
||||||
* `context`
|
* `context`
|
||||||
* `dockerfile`
|
* `dockerfile`
|
||||||
|
* `entitlements`
|
||||||
* `labels`
|
* `labels`
|
||||||
* `load`
|
* `load`
|
||||||
* `no-cache`
|
* `no-cache`
|
||||||
@@ -371,3 +375,20 @@ You can override the following fields:
|
|||||||
* `ssh`
|
* `ssh`
|
||||||
* `tags`
|
* `tags`
|
||||||
* `target`
|
* `target`
|
||||||
|
|
||||||
|
You can append using `+=` operator for the following fields:
|
||||||
|
|
||||||
|
* `annotations`¹
|
||||||
|
* `attest`¹
|
||||||
|
* `cache-from`
|
||||||
|
* `cache-to`
|
||||||
|
* `entitlements`¹
|
||||||
|
* `no-cache-filter`
|
||||||
|
* `output`
|
||||||
|
* `platform`
|
||||||
|
* `secrets`
|
||||||
|
* `ssh`
|
||||||
|
* `tags`
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> ¹ These fields already append by default.
|
||||||
|
@@ -16,7 +16,7 @@ Start a build
|
|||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||||
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| [`--allow`](#allow) | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||||
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||||
@@ -28,7 +28,6 @@ Start a build
|
|||||||
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
|
||||||
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--label` | `stringArray` | | Set metadata for an image |
|
||||||
@@ -44,10 +43,8 @@ Start a build
|
|||||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||||
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
|
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
|
||||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
|
||||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
||||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
|
||||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
||||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||||
@@ -944,7 +941,7 @@ $ docker buildx build --secret [type=file,]id=<ID>[,src=<FILEPATH>] .
|
|||||||
###### `type=file` usage
|
###### `type=file` usage
|
||||||
|
|
||||||
In the following example, `type=file` is automatically detected because no
|
In the following example, `type=file` is automatically detected because no
|
||||||
environment variable mathing `aws` (the ID) is set.
|
environment variable matching `aws` (the ID) is set.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
||||||
|
@@ -12,16 +12,13 @@ Start debugger (EXPERIMENTAL)
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
|
|:----------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
|
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
|
||||||
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
|
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
|
||||||
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
|
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
|
||||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
|
|
||||||
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
|
|
||||||
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
@@ -12,7 +12,7 @@ Start a build
|
|||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||||
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| `--allow` | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
| `--annotation` | `stringArray` | | Add annotation to the image |
|
| `--annotation` | `stringArray` | | Add annotation to the image |
|
||||||
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||||
| `--build-arg` | `stringArray` | | Set build-time variables |
|
| `--build-arg` | `stringArray` | | Set build-time variables |
|
||||||
@@ -24,7 +24,6 @@ Start a build
|
|||||||
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||||
| `--check` | `bool` | | Shorthand for `--call=check` |
|
| `--check` | `bool` | | Shorthand for `--call=check` |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
|
||||||
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--label` | `stringArray` | | Set metadata for an image |
|
||||||
@@ -40,10 +39,8 @@ Start a build
|
|||||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||||
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
|
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
|
||||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
|
||||||
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
||||||
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
|
||||||
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
||||||
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||||
|
@@ -7,6 +7,8 @@ Commands to work on build records
|
|||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
|:---------------------------------------|:-----------------------------------------------|
|
|:---------------------------------------|:-----------------------------------------------|
|
||||||
|
| [`export`](buildx_history_export.md) | Export a build into Docker Desktop bundle |
|
||||||
|
| [`import`](buildx_history_import.md) | Import a build into Docker Desktop |
|
||||||
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
|
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
|
||||||
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
|
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
|
||||||
| [`ls`](buildx_history_ls.md) | List build records |
|
| [`ls`](buildx_history_ls.md) | List build records |
|
||||||
@@ -25,3 +27,32 @@ Commands to work on build records
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
### Build references
|
||||||
|
|
||||||
|
Most `buildx history` subcommands accept a build reference to identify which
|
||||||
|
build to act on. You can specify the build in two ways:
|
||||||
|
|
||||||
|
- By build ID, fetched by `docker buildx history ls`:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output build.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
- By relative offset, to refer to recent builds:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export ^1 --output build.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
- `^0` or no reference targets the most recent build
|
||||||
|
- `^1` refers to the build before the most recent
|
||||||
|
- `^2` refers to two builds back, and so on
|
||||||
|
|
||||||
|
Offset references are supported in the following `buildx history` commands:
|
||||||
|
|
||||||
|
- `logs`
|
||||||
|
- `inspect`
|
||||||
|
- `open`
|
||||||
|
- `trace`
|
||||||
|
- `export`
|
||||||
|
- `rm`
|
||||||
|
81
docs/reference/buildx_history_export.md
Normal file
81
docs/reference/buildx_history_export.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# docker buildx history export
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Export a build into Docker Desktop bundle
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:---------------------------------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| [`--all`](#all) | `bool` | | Export all records for the builder |
|
||||||
|
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||||
|
| [`-D`](#debug), [`--debug`](#debug) | `bool` | | Enable debug logging |
|
||||||
|
| [`-o`](#output), [`--output`](#output) | `string` | | Output file path |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Export one or more build records to `.dockerbuild` archive files. These archives
|
||||||
|
contain metadata, logs, and build outputs, and can be imported into Docker
|
||||||
|
Desktop or shared across environments.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="output"></a> Export a single build to a custom file (--output)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output mybuild.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
You can find build IDs by running:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history ls
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="o"></a> Export multiple builds to individual `.dockerbuild` files (-o)
|
||||||
|
|
||||||
|
To export two builds to separate files:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using build IDs
|
||||||
|
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 -o multi.dockerbuild
|
||||||
|
|
||||||
|
# Or using relative offsets
|
||||||
|
docker buildx history export ^1 ^2 -o multi.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
Or use shell redirection:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export ^1 > mybuild.dockerbuild
|
||||||
|
docker buildx history export ^2 > backend-build.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="all"></a> Export all build records to a file (--all)
|
||||||
|
|
||||||
|
Use the `--all` flag and redirect the output:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export --all > all-builds.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
Or use the `--output` flag:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export --all -o all-builds.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="builder"></a> Use a specific builder instance (--builder)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export --builder builder0 ^1 -o builder0-build.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="debug"></a> Enable debug logging (--debug)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history export --debug qu2gsuo8ejqrwdfii23xkkckt -o debug-build.dockerbuild
|
||||||
|
```
|
47
docs/reference/buildx_history_import.md
Normal file
47
docs/reference/buildx_history_import.md
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# docker buildx history import
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Import a build into Docker Desktop
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:---------------------------------|:--------------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Import from a file path |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Import a build record from a `.dockerbuild` archive into Docker Desktop. This
|
||||||
|
lets you view, inspect, and analyze builds created in other environments or CI
|
||||||
|
pipelines.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Import a `.dockerbuild` archive from standard input
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history import < mybuild.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="file"></a> Import a build archive from a file (--file)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history import --file ./artifacts/backend-build.dockerbuild
|
||||||
|
```
|
||||||
|
|
||||||
|
### Open a build manually
|
||||||
|
|
||||||
|
By default, the `import` command automatically opens the imported build in Docker
|
||||||
|
Desktop. You don't need to run `open` unless you're opening a specific build
|
||||||
|
or re-opening it later.
|
||||||
|
|
||||||
|
If you've imported multiple builds, you can open one manually:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history open ci-build
|
||||||
|
```
|
@@ -21,13 +21,61 @@ Inspect a build
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Inspect a build record to view metadata such as duration, status, build inputs,
|
||||||
|
platforms, outputs, and attached artifacts. You can also use flags to extract
|
||||||
|
provenance, SBOMs, or other detailed information.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
### Inspect the most recent build
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect
|
||||||
|
Name: buildx (binaries)
|
||||||
|
Context: .
|
||||||
|
Dockerfile: Dockerfile
|
||||||
|
VCS Repository: https://github.com/crazy-max/buildx.git
|
||||||
|
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
|
||||||
|
Target: binaries
|
||||||
|
Platforms: linux/amd64
|
||||||
|
Keep Git Dir: true
|
||||||
|
|
||||||
|
Started: 2025-02-07 11:56:24
|
||||||
|
Duration: 1m 1s
|
||||||
|
Build Steps: 16/16 (25% cached)
|
||||||
|
|
||||||
|
Image Resolve Mode: local
|
||||||
|
|
||||||
|
Materials:
|
||||||
|
URI DIGEST
|
||||||
|
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
|
||||||
|
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
|
||||||
|
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
|
||||||
|
|
||||||
|
Attachments:
|
||||||
|
DIGEST PLATFORM TYPE
|
||||||
|
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspect a specific build
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history inspect qu2gsuo8ejqrwdfii23xkkckt
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history inspect ^1
|
||||||
|
```
|
||||||
|
|
||||||
### <a name="format"></a> Format the output (--format)
|
### <a name="format"></a> Format the output (--format)
|
||||||
|
|
||||||
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
|
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
|
||||||
`json` or using a Go template.
|
`json` or using a Go template.
|
||||||
|
|
||||||
|
**Pretty output**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx history inspect
|
$ docker buildx history inspect
|
||||||
Name: buildx (binaries)
|
Name: buildx (binaries)
|
||||||
@@ -57,6 +105,7 @@ sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3
|
|||||||
|
|
||||||
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
|
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
|
||||||
```
|
```
|
||||||
|
**JSON output**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx history inspect --format json
|
$ docker buildx history inspect --format json
|
||||||
@@ -111,6 +160,8 @@ $ docker buildx history inspect --format json
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Go template output**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
|
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
|
||||||
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
|
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
|
||||||
|
@@ -5,13 +5,78 @@ Inspect a build attachment
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
|:------------------|:---------|:--------|:-----------------------------------------|
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--platform` | `string` | | Platform of attachment |
|
| `--platform` | `string` | | Platform of attachment |
|
||||||
| `--type` | `string` | | Type of attachment |
|
| [`--type`](#type) | `string` | | Type of attachment |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Inspect a specific attachment from a build record, such as a provenance file or
|
||||||
|
SBOM. Attachments are optional artifacts stored with the build and may be
|
||||||
|
platform-specific.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="type"></a> Inspect a provenance attachment from a build (--type)
|
||||||
|
|
||||||
|
Supported types include `provenance` and `sbom`.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt --type provenance
|
||||||
|
{
|
||||||
|
"_type": "https://slsa.dev/provenance/v0.2",
|
||||||
|
"buildDefinition": {
|
||||||
|
"buildType": "https://build.docker.com/BuildKit@v1",
|
||||||
|
"externalParameters": {
|
||||||
|
"target": "app",
|
||||||
|
"platforms": ["linux/amd64"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"runDetails": {
|
||||||
|
"builder": "docker",
|
||||||
|
"by": "ci@docker.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspect a SBOM for linux/amd64
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect attachment ^0 \
|
||||||
|
--type sbom \
|
||||||
|
--platform linux/amd64
|
||||||
|
{
|
||||||
|
"bomFormat": "CycloneDX",
|
||||||
|
"specVersion": "1.5",
|
||||||
|
"version": 1,
|
||||||
|
"components": [
|
||||||
|
{
|
||||||
|
"type": "library",
|
||||||
|
"name": "alpine",
|
||||||
|
"version": "3.18.2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspect an attachment by digest
|
||||||
|
|
||||||
|
You can inspect an attachment directly using its digset, which you can get from
|
||||||
|
the `inspect` output:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt sha256:abcdef123456...
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history inspect attachment ^0 sha256:abcdef123456...
|
||||||
|
```
|
||||||
|
|
||||||
|
Use `--type sbom` or `--type provenance` to filter attachments by type. To
|
||||||
|
inspect a specific attachment by digest, omit the `--type` flag.
|
||||||
|
@@ -5,12 +5,61 @@ Print the logs of a build
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------|:---------|:--------|:--------------------------------------------------|
|
|:--------------------------|:---------|:--------|:--------------------------------------------------|
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--progress` | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
|
| [`--progress`](#progress) | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Print the logs for a completed build. The output appears in the same format as
|
||||||
|
`--progress=plain`, showing the full logs for each step.
|
||||||
|
|
||||||
|
By default, this shows logs for the most recent build on the current builder.
|
||||||
|
|
||||||
|
You can also specify an earlier build using an offset. For example:
|
||||||
|
|
||||||
|
- `^1` shows logs for the build before the most recent
|
||||||
|
- `^2` shows logs for the build two steps back
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Print logs for the most recent build
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history logs
|
||||||
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 31B done
|
||||||
|
#1 DONE 0.0s
|
||||||
|
#2 [internal] load .dockerignore
|
||||||
|
#2 transferring context: 2B done
|
||||||
|
#2 DONE 0.0s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, this shows logs for the most recent build on the current builder.
|
||||||
|
|
||||||
|
### Print logs for a specific build
|
||||||
|
|
||||||
|
To print logs for a specific build, use a build ID or offset:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history logs qu2gsuo8ejqrwdfii23xkkckt
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history logs ^1
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history logs ^1 --progress rawjson
|
||||||
|
{"id":"buildx_step_1","status":"START","timestamp":"2024-05-01T12:34:56.789Z","detail":"[internal] load build definition from Dockerfile"}
|
||||||
|
{"id":"buildx_step_1","status":"COMPLETE","timestamp":"2024-05-01T12:34:57.001Z","duration":212000000}
|
||||||
|
...
|
||||||
|
```
|
||||||
|
@@ -5,13 +5,98 @@ List build records
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
|:--------------------------|:--------------|:--------|:---------------------------------------------|
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--format` | `string` | `table` | Format the output |
|
| [`--filter`](#filter) | `stringArray` | | Provide filter values (e.g., `status=error`) |
|
||||||
| `--no-trunc` | `bool` | | Don't truncate output |
|
| [`--format`](#format) | `string` | `table` | Format the output |
|
||||||
|
| [`--local`](#local) | `bool` | | List records for current repository only |
|
||||||
|
| [`--no-trunc`](#no-trunc) | `bool` | | Don't truncate output |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
List completed builds recorded by the active builder. Each entry includes the
|
||||||
|
build ID, name, status, timestamp, and duration.
|
||||||
|
|
||||||
|
By default, only records for the current builder are shown. You can filter
|
||||||
|
results using flags.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### List all build records for the current builder
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history ls
|
||||||
|
BUILD ID NAME STATUS CREATED AT DURATION
|
||||||
|
qu2gsuo8ejqrwdfii23xkkckt .dev/2850 Completed 3 days ago 1.4s
|
||||||
|
qsiifiuf1ad9pa9qvppc0z1l3 .dev/2850 Completed 3 days ago 1.3s
|
||||||
|
g9808bwrjrlkbhdamxklx660b .dev/3120 Completed 5 days ago 2.1s
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="filter"></a> List failed builds (--filter)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history ls --filter status=error
|
||||||
|
```
|
||||||
|
|
||||||
|
You can filter the list using the `--filter` flag. Supported filters include:
|
||||||
|
|
||||||
|
| Filter | Supported comparisons | Example |
|
||||||
|
|:-------|:----------------------|:--------|
|
||||||
|
| `ref`, `repository`, `status` | Support `=` and `!=` comparisons | `--filter status!=success` |
|
||||||
|
| `startedAt`, `completedAt`, `duration` | Support `<` and `>` comparisons with time values | `--filter duration>30s` |
|
||||||
|
|
||||||
|
You can combine multiple filters by repeating the `--filter` flag:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history ls --filter status=error --filter duration>30s
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="local"></a> List builds from the current project (--local)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history ls --local
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="no-trunc"></a> Display full output without truncation (--no-trunc)
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history ls --no-trunc
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="format"></a> Format output (--format)
|
||||||
|
|
||||||
|
**JSON output**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history ls --format json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"ID": "qu2gsuo8ejqrwdfii23xkkckt",
|
||||||
|
"Name": ".dev/2850",
|
||||||
|
"Status": "Completed",
|
||||||
|
"CreatedAt": "2025-04-15T12:33:00Z",
|
||||||
|
"Duration": "1.4s"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ID": "qsiifiuf1ad9pa9qvppc0z1l3",
|
||||||
|
"Name": ".dev/2850",
|
||||||
|
"Status": "Completed",
|
||||||
|
"CreatedAt": "2025-04-15T12:29:00Z",
|
||||||
|
"Duration": "1.3s"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Go template output**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history ls --format '{{.Name}} - {{.Duration}}'
|
||||||
|
.dev/2850 - 1.4s
|
||||||
|
.dev/2850 - 1.3s
|
||||||
|
.dev/3120 - 2.1s
|
||||||
|
```
|
||||||
|
@@ -13,3 +13,27 @@ Open a build in Docker Desktop
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Open a build record in Docker Desktop for visual inspection. This requires
|
||||||
|
Docker Desktop to be installed and running on the host machine.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Open the most recent build in Docker Desktop
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history open
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, this opens the most recent build on the current builder.
|
||||||
|
|
||||||
|
### Open a specific build
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history open qu2gsuo8ejqrwdfii23xkkckt
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history open ^1
|
||||||
|
```
|
||||||
|
@@ -14,3 +14,36 @@ Remove build records
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Remove one or more build records from the current builder’s history. You can
|
||||||
|
remove specific builds by ID or offset, or delete all records at once using
|
||||||
|
the `--all` flag.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Remove a specific build
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history rm ^1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Remove multiple builds
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using build IDs
|
||||||
|
docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3
|
||||||
|
|
||||||
|
# Or using relative offsets
|
||||||
|
docker buildx history rm ^1 ^2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Remove all build records from the current builder
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history rm --all
|
||||||
|
```
|
||||||
|
@@ -5,13 +5,65 @@ Show the OpenTelemetry trace of a build record
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------|:---------|:--------------|:-----------------------------------------|
|
|:------------------------|:---------|:--------------|:-----------------------------------------|
|
||||||
| `--addr` | `string` | `127.0.0.1:0` | Address to bind the UI server |
|
| [`--addr`](#addr) | `string` | `127.0.0.1:0` | Address to bind the UI server |
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `--compare` | `string` | | Compare with another build reference |
|
| [`--compare`](#compare) | `string` | | Compare with another build reference |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
View the OpenTelemetry trace for a completed build. This command loads the
|
||||||
|
trace into a Jaeger UI viewer and opens it in your browser.
|
||||||
|
|
||||||
|
This helps analyze build performance, step timing, and internal execution flows.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Open the OpenTelemetry trace for the most recent build
|
||||||
|
|
||||||
|
This command starts a temporary Jaeger UI server and opens your default browser
|
||||||
|
to view the trace.
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker buildx history trace
|
||||||
|
```
|
||||||
|
|
||||||
|
### Open the trace for a specific build
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history trace ^1
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="addr"></a> Run the Jaeger UI on a specific port (--addr)
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using a build ID
|
||||||
|
docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt --addr 127.0.0.1:16686
|
||||||
|
|
||||||
|
# Or using a relative offset
|
||||||
|
docker buildx history trace ^1 --addr 127.0.0.1:16686
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="compare"></a> Compare two build traces (--compare)
|
||||||
|
|
||||||
|
Compare two specific builds by name:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Using build IDs
|
||||||
|
docker buildx history trace --compare=qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3
|
||||||
|
|
||||||
|
# Or using a single relative offset
|
||||||
|
docker buildx history trace --compare=^1
|
||||||
|
```
|
||||||
|
|
||||||
|
When you use a single reference with `--compare`, it compares that build
|
||||||
|
against the most recent one.
|
||||||
|
@@ -24,11 +24,10 @@ import (
|
|||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/api/types/system"
|
"github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
mobyarchive "github.com/moby/go-archive"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -56,6 +55,7 @@ type Driver struct {
|
|||||||
restartPolicy container.RestartPolicy
|
restartPolicy container.RestartPolicy
|
||||||
env []string
|
env []string
|
||||||
defaultLoad bool
|
defaultLoad bool
|
||||||
|
gpus []container.DeviceRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
@@ -106,8 +106,9 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
// image pulling failed, check if it exists in local image store.
|
// image pulling failed, check if it exists in local image store.
|
||||||
// if not, return pulling error. otherwise log it.
|
// if not, return pulling error. otherwise log it.
|
||||||
_, _, errInspect := d.DockerAPI.ImageInspectWithRaw(ctx, imageName)
|
_, errInspect := d.DockerAPI.ImageInspect(ctx, imageName)
|
||||||
if errInspect != nil {
|
found := errInspect == nil
|
||||||
|
if !found {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
||||||
@@ -157,6 +158,9 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
if d.cpusetMems != "" {
|
if d.cpusetMems != "" {
|
||||||
hc.Resources.CpusetMems = d.cpusetMems
|
hc.Resources.CpusetMems = d.cpusetMems
|
||||||
}
|
}
|
||||||
|
if len(d.gpus) > 0 && d.hasGPUCapability(ctx, cfg.Image, d.gpus) {
|
||||||
|
hc.Resources.DeviceRequests = d.gpus
|
||||||
|
}
|
||||||
if info, err := d.DockerAPI.Info(ctx); err == nil {
|
if info, err := d.DockerAPI.Info(ctx); err == nil {
|
||||||
if info.CgroupDriver == "cgroupfs" {
|
if info.CgroupDriver == "cgroupfs" {
|
||||||
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
||||||
@@ -245,8 +249,8 @@ func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) e
|
|||||||
if srcPath != "" {
|
if srcPath != "" {
|
||||||
defer os.RemoveAll(srcPath)
|
defer os.RemoveAll(srcPath)
|
||||||
}
|
}
|
||||||
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
|
srcArchive, err := mobyarchive.TarWithOptions(srcPath, &mobyarchive.TarOptions{
|
||||||
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
|
ChownOpts: &mobyarchive.ChownOpts{UID: 0, GID: 0},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -419,6 +423,7 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: true,
|
driver.DockerExporter: true,
|
||||||
driver.CacheExport: true,
|
driver.CacheExport: true,
|
||||||
driver.MultiPlatform: true,
|
driver.MultiPlatform: true,
|
||||||
|
driver.DirectPush: true,
|
||||||
driver.DefaultLoad: d.defaultLoad,
|
driver.DefaultLoad: d.defaultLoad,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -427,6 +432,31 @@ func (d *Driver) HostGatewayIP(ctx context.Context) (net.IP, error) {
|
|||||||
return nil, errors.New("host-gateway is not supported by the docker-container driver")
|
return nil, errors.New("host-gateway is not supported by the docker-container driver")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasGPUCapability checks if docker daemon has GPU capability. We need to run
|
||||||
|
// a dummy container with GPU device to check if the daemon has this capability
|
||||||
|
// because there is no API to check it yet.
|
||||||
|
func (d *Driver) hasGPUCapability(ctx context.Context, image string, gpus []container.DeviceRequest) bool {
|
||||||
|
cfg := &container.Config{
|
||||||
|
Image: image,
|
||||||
|
Entrypoint: []string{"/bin/true"},
|
||||||
|
}
|
||||||
|
hc := &container.HostConfig{
|
||||||
|
NetworkMode: container.NetworkMode(container.IPCModeNone),
|
||||||
|
AutoRemove: true,
|
||||||
|
Resources: container.Resources{
|
||||||
|
DeviceRequests: gpus,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
resp, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, "")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err := d.DockerAPI.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func demuxConn(c net.Conn) net.Conn {
|
func demuxConn(c net.Conn) net.Conn {
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
// TODO: rewrite parser with Reader() to avoid goroutine switch
|
// TODO: rewrite parser with Reader() to avoid goroutine switch
|
||||||
|
@@ -51,6 +51,12 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
InitConfig: cfg,
|
InitConfig: cfg,
|
||||||
restartPolicy: rp,
|
restartPolicy: rp,
|
||||||
}
|
}
|
||||||
|
var gpus dockeropts.GpuOpts
|
||||||
|
if err := gpus.Set("all"); err == nil {
|
||||||
|
if v := gpus.Value(); len(v) > 0 {
|
||||||
|
d.gpus = v
|
||||||
|
}
|
||||||
|
}
|
||||||
for k, v := range cfg.DriverOpts {
|
for k, v := range cfg.DriverOpts {
|
||||||
switch {
|
switch {
|
||||||
case k == "network":
|
case k == "network":
|
||||||
|
@@ -93,6 +93,7 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: useContainerdSnapshotter,
|
driver.DockerExporter: useContainerdSnapshotter,
|
||||||
driver.CacheExport: useContainerdSnapshotter,
|
driver.CacheExport: useContainerdSnapshotter,
|
||||||
driver.MultiPlatform: useContainerdSnapshotter,
|
driver.MultiPlatform: useContainerdSnapshotter,
|
||||||
|
driver.DirectPush: useContainerdSnapshotter,
|
||||||
driver.DefaultLoad: true,
|
driver.DefaultLoad: true,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@@ -7,5 +7,6 @@ const DockerExporter Feature = "Docker exporter"
|
|||||||
|
|
||||||
const CacheExport Feature = "Cache export"
|
const CacheExport Feature = "Cache export"
|
||||||
const MultiPlatform Feature = "Multi-platform build"
|
const MultiPlatform Feature = "Multi-platform build"
|
||||||
|
const DirectPush Feature = "Direct push"
|
||||||
|
|
||||||
const DefaultLoad Feature = "Automatically load images to the Docker Engine image store"
|
const DefaultLoad Feature = "Automatically load images to the Docker Engine image store"
|
||||||
|
@@ -35,10 +35,10 @@ func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLS
|
|||||||
}
|
}
|
||||||
|
|
||||||
var testStoreCfg = store.NewConfig(
|
var testStoreCfg = store.NewConfig(
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &map[string]interface{}{}
|
return &map[string]any{}
|
||||||
},
|
},
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSaveLoadContexts(t *testing.T) {
|
func TestSaveLoadContexts(t *testing.T) {
|
||||||
@@ -197,7 +197,7 @@ func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca,
|
|||||||
|
|
||||||
func save(s store.Writer, ep Endpoint, name string) error {
|
func save(s store.Writer, ep Endpoint, name string) error {
|
||||||
meta := store.Metadata{
|
meta := store.Metadata{
|
||||||
Endpoints: map[string]interface{}{
|
Endpoints: map[string]any{
|
||||||
KubernetesEndpoint: ep.EndpointMeta,
|
KubernetesEndpoint: ep.EndpointMeta,
|
||||||
},
|
},
|
||||||
Name: name,
|
Name: name,
|
||||||
|
@@ -43,7 +43,7 @@ type Endpoint struct {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
command.RegisterDefaultStoreEndpoints(
|
command.RegisterDefaultStoreEndpoints(
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,7 +96,7 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
|||||||
|
|
||||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||||
// endpoint, which is derived from the env-based kubeconfig.
|
// endpoint, which is derived from the env-based kubeconfig.
|
||||||
func (c *EndpointMeta) ResolveDefault() (interface{}, *store.EndpointTLSData, error) {
|
func (c *EndpointMeta) ResolveDefault() (any, *store.EndpointTLSData, error) {
|
||||||
kubeconfig := os.Getenv("KUBECONFIG")
|
kubeconfig := os.Getenv("KUBECONFIG")
|
||||||
if kubeconfig == "" {
|
if kubeconfig == "" {
|
||||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||||
|
@@ -238,6 +238,7 @@ func (d *Driver) Features(_ context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: d.DockerAPI != nil,
|
driver.DockerExporter: d.DockerAPI != nil,
|
||||||
driver.CacheExport: true,
|
driver.CacheExport: true,
|
||||||
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
||||||
|
driver.DirectPush: true,
|
||||||
driver.DefaultLoad: d.defaultLoad,
|
driver.DefaultLoad: d.defaultLoad,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -90,7 +90,7 @@ func ListRunningPods(ctx context.Context, client clientcorev1.PodInterface, depl
|
|||||||
for i := range podList.Items {
|
for i := range podList.Items {
|
||||||
pod := &podList.Items[i]
|
pod := &podList.Items[i]
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
logrus.Debugf("pod runnning: %q", pod.Name)
|
logrus.Debugf("pod running: %q", pod.Name)
|
||||||
runningPods = append(runningPods, pod)
|
runningPods = append(runningPods, pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user