mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-23 20:19:08 +08:00
Compare commits
366 Commits
v0.18.0-rc
...
v0.22
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18ccba0720 | ||
|
|
f5196f1167 | ||
|
|
ef99381eab | ||
|
|
00fdcd38ab | ||
|
|
97f1d47464 | ||
|
|
337578242d | ||
|
|
503a8925d2 | ||
|
|
0d708c0bc2 | ||
|
|
3a7523a117 | ||
|
|
5dc1a3308d | ||
|
|
eb78253dfd | ||
|
|
5f8b78a113 | ||
|
|
67d3ed34e4 | ||
|
|
b88423be50 | ||
|
|
c1e2ae5636 | ||
|
|
23afb70e40 | ||
|
|
812b42b329 | ||
|
|
d5d3d3d502 | ||
|
|
e19c729d3e | ||
|
|
aefa49c4fa | ||
|
|
7d927ee604 | ||
|
|
058c098c8c | ||
|
|
7b7dbe88b1 | ||
|
|
cadf4a5893 | ||
|
|
6cd9fef556 | ||
|
|
963b9ca30d | ||
|
|
4636c8051a | ||
|
|
e23695d50d | ||
|
|
6eff9b2d51 | ||
|
|
fcbfc85f42 | ||
|
|
9a204c44c3 | ||
|
|
4c6eba5acd | ||
|
|
fea7459880 | ||
|
|
e2d52a8465 | ||
|
|
48a591b1e1 | ||
|
|
128acdb471 | ||
|
|
411d3f8cea | ||
|
|
7925a96726 | ||
|
|
b06bddfee6 | ||
|
|
fe17ebda89 | ||
|
|
4ed1e07f16 | ||
|
|
f49593ce2c | ||
|
|
4e91fe6507 | ||
|
|
921b576f3a | ||
|
|
548c80ab5a | ||
|
|
f3a4740d5f | ||
|
|
89917dc696 | ||
|
|
f7276201ac | ||
|
|
beb9f515c0 | ||
|
|
4f7d145c0e | ||
|
|
ccdf63c644 | ||
|
|
9a6b8754b1 | ||
|
|
e75ac22ba6 | ||
|
|
62f5cc7c80 | ||
|
|
6272ae1afa | ||
|
|
accfbf6e24 | ||
|
|
af2d8fe555 | ||
|
|
18f4275a92 | ||
|
|
221a608b3c | ||
|
|
cc0391eba5 | ||
|
|
aef388bf7a | ||
|
|
80c16bc28c | ||
|
|
75160643e1 | ||
|
|
ad18ffc018 | ||
|
|
80c3832c94 | ||
|
|
7762ab2c38 | ||
|
|
b973de2dd3 | ||
|
|
352ce7e875 | ||
|
|
cdfc1ed750 | ||
|
|
d0d3433b12 | ||
|
|
b04d39494f | ||
|
|
52f503e806 | ||
|
|
79a978484d | ||
|
|
f7992033bf | ||
|
|
73f61aa338 | ||
|
|
faa573f484 | ||
|
|
0a4a1babd1 | ||
|
|
461bd9e5d1 | ||
|
|
d6fdf83f45 | ||
|
|
ef4e9fea83 | ||
|
|
0c296fe857 | ||
|
|
ef73c64d2c | ||
|
|
1784f84561 | ||
|
|
6a6fa4f422 | ||
|
|
2dc0350ffe | ||
|
|
b85fc5c484 | ||
|
|
2389d457a4 | ||
|
|
3f82aadc6e | ||
|
|
79e3f12305 | ||
|
|
1dc5f0751b | ||
|
|
7ba4da0800 | ||
|
|
a64e628774 | ||
|
|
1c4b1a376c | ||
|
|
e1f690abfc | ||
|
|
03569c2188 | ||
|
|
350d3f0f4b | ||
|
|
dc27815236 | ||
|
|
1089ff7341 | ||
|
|
7433d37183 | ||
|
|
f9a76355b5 | ||
|
|
cfeea34b2d | ||
|
|
ba2d3692a6 | ||
|
|
853b593a4d | ||
|
|
efb300e613 | ||
|
|
cee7b344da | ||
|
|
67dbde6970 | ||
|
|
295653dabb | ||
|
|
f5802119c5 | ||
|
|
40b9ac1ec5 | ||
|
|
f11496448a | ||
|
|
c8c9c72ca6 | ||
|
|
9fe8139022 | ||
|
|
b3e8c62635 | ||
|
|
b8e9c28315 | ||
|
|
3ae9970da5 | ||
|
|
1d219100fc | ||
|
|
464f9278d1 | ||
|
|
7216086b8c | ||
|
|
b195b80ddf | ||
|
|
70a5e266d1 | ||
|
|
689bea7963 | ||
|
|
5176c38115 | ||
|
|
ec440c4574 | ||
|
|
0a4eb7ec76 | ||
|
|
f710c93157 | ||
|
|
d1a0a1497c | ||
|
|
c880ecd513 | ||
|
|
d557da1935 | ||
|
|
417af36abc | ||
|
|
e236b86297 | ||
|
|
633e8a0881 | ||
|
|
5e1ea62f92 | ||
|
|
4b90b84995 | ||
|
|
abc85c38f8 | ||
|
|
ccca7c795a | ||
|
|
04aab6958c | ||
|
|
9d640f0e33 | ||
|
|
b76fdcaf8d | ||
|
|
d693e18c04 | ||
|
|
b066ee1110 | ||
|
|
cf8bf9e104 | ||
|
|
3bd54b19aa | ||
|
|
934841f329 | ||
|
|
b2ababc7b6 | ||
|
|
0ccdb7e248 | ||
|
|
cacb4fb9b3 | ||
|
|
df80bd72c6 | ||
|
|
bb4bef2f04 | ||
|
|
a11507344a | ||
|
|
17af006857 | ||
|
|
11c84973ef | ||
|
|
cc4a291f6a | ||
|
|
aa1fbc0421 | ||
|
|
b2bbb337e4 | ||
|
|
012df71b63 | ||
|
|
a26bb271ab | ||
|
|
3e0682f039 | ||
|
|
3aed658dc4 | ||
|
|
b4a0dee723 | ||
|
|
6904512813 | ||
|
|
d41e335466 | ||
|
|
0954dcb5fd | ||
|
|
38f64bf709 | ||
|
|
c1d3955fbe | ||
|
|
d0b63e60e2 | ||
|
|
e141c8fa71 | ||
|
|
2ee156236b | ||
|
|
1335264c9d | ||
|
|
e74185aa6d | ||
|
|
0224773102 | ||
|
|
8c27b5c545 | ||
|
|
f7594d484b | ||
|
|
f118749cdc | ||
|
|
0d92ad713c | ||
|
|
a18ff4d5ef | ||
|
|
b035a04aaa | ||
|
|
6220e0aae8 | ||
|
|
d9abc78e8f | ||
|
|
3313026961 | ||
|
|
06912aa24c | ||
|
|
cde0e9814d | ||
|
|
2e6e146087 | ||
|
|
af3cbe6cec | ||
|
|
1ef9e67cbb | ||
|
|
75204426bd | ||
|
|
b73f58a90b | ||
|
|
6f5486e718 | ||
|
|
3fa0c3d122 | ||
|
|
b0b902de41 | ||
|
|
77d632e0c5 | ||
|
|
6a12543db3 | ||
|
|
4027b60fa0 | ||
|
|
dda8df3b06 | ||
|
|
d54a110b3c | ||
|
|
44fa243d58 | ||
|
|
630066bfc5 | ||
|
|
026ac2313c | ||
|
|
45fc5ed3b3 | ||
|
|
1eb167a767 | ||
|
|
45d2ec69f1 | ||
|
|
793ec7f3b2 | ||
|
|
6cb62dddf2 | ||
|
|
66ecb53fa7 | ||
|
|
26026810fe | ||
|
|
d3830e0a6e | ||
|
|
8c2759f6ae | ||
|
|
8a472c6c9d | ||
|
|
b98653d8fe | ||
|
|
807d15ff9d | ||
|
|
ac636fd2d8 | ||
|
|
769d22fb30 | ||
|
|
e36535e137 | ||
|
|
ada44e82ea | ||
|
|
16edf5d4aa | ||
|
|
11c85b2369 | ||
|
|
41215835cf | ||
|
|
a41fc81796 | ||
|
|
5f057bdee7 | ||
|
|
883806524a | ||
|
|
38b71998f5 | ||
|
|
07db2be2f0 | ||
|
|
f3f5e760b3 | ||
|
|
e762d3dbca | ||
|
|
4ecbb018f2 | ||
|
|
a8f4699c5e | ||
|
|
7cf12fce98 | ||
|
|
07190d20da | ||
|
|
c79368c199 | ||
|
|
f47d12e692 | ||
|
|
0fc204915a | ||
|
|
3a0eeeacd5 | ||
|
|
e6ce3917d3 | ||
|
|
e085ed8c5c | ||
|
|
b83c3e239e | ||
|
|
a90d5794ee | ||
|
|
c571b9d730 | ||
|
|
af53930206 | ||
|
|
c4a2db8f0c | ||
|
|
206bd6c3a2 | ||
|
|
5c169dd878 | ||
|
|
875e717361 | ||
|
|
72c3d4a237 | ||
|
|
ce46297960 | ||
|
|
e8389c8a02 | ||
|
|
804ee66f13 | ||
|
|
5c5bc510ac | ||
|
|
0dfc4a1019 | ||
|
|
1e992b295c | ||
|
|
4f81bcb5c8 | ||
|
|
3771fe2034 | ||
|
|
5dd4ae0335 | ||
|
|
567361d494 | ||
|
|
21b1be1667 | ||
|
|
876e003685 | ||
|
|
a53ed0a354 | ||
|
|
737da6959d | ||
|
|
6befa70cc8 | ||
|
|
2d051bde96 | ||
|
|
63985b591b | ||
|
|
695200c81a | ||
|
|
828c1dbf98 | ||
|
|
f321d4ac95 | ||
|
|
0d13bf6606 | ||
|
|
3e3242cfdd | ||
|
|
f9e2d07b30 | ||
|
|
c281e18892 | ||
|
|
98d4cb1eb3 | ||
|
|
70f2fb6442 | ||
|
|
fdac6d5fe7 | ||
|
|
d4eca07af8 | ||
|
|
95e77da0fa | ||
|
|
6810a7c69c | ||
|
|
dd596d6542 | ||
|
|
c6e403ad7f | ||
|
|
d6d713aac6 | ||
|
|
f148976e6e | ||
|
|
8f70196de1 | ||
|
|
e196855bed | ||
|
|
71c7889719 | ||
|
|
a3418e0178 | ||
|
|
6a1cf78879 | ||
|
|
ec1f712328 | ||
|
|
5ce6597c07 | ||
|
|
9c75071793 | ||
|
|
d612139b19 | ||
|
|
42f7898c53 | ||
|
|
3148c098a2 | ||
|
|
f95d574f94 | ||
|
|
60822781be | ||
|
|
4c83475703 | ||
|
|
17eff25fe5 | ||
|
|
9c8ffb77d6 | ||
|
|
13a426fca6 | ||
|
|
1a039115bc | ||
|
|
07d58782b8 | ||
|
|
3ccbb88e6a | ||
|
|
a34c641bc4 | ||
|
|
f10be074b4 | ||
|
|
9f429965c0 | ||
|
|
f3929447d7 | ||
|
|
615f4f6759 | ||
|
|
9a7b028bab | ||
|
|
1af4f05ba4 | ||
|
|
4b5d78db9b | ||
|
|
d2c512a95b | ||
|
|
5937ba0e00 | ||
|
|
21fb026aa3 | ||
|
|
bc45641086 | ||
|
|
96689e5d05 | ||
|
|
50a8f11f0f | ||
|
|
11cf38bd97 | ||
|
|
300d56b3ff | ||
|
|
e04da86aca | ||
|
|
9f1fc99018 | ||
|
|
26bbddb5d6 | ||
|
|
58fd190c31 | ||
|
|
e7a53fb829 | ||
|
|
c0fd64f4f8 | ||
|
|
0c629335ac | ||
|
|
f216b71ad2 | ||
|
|
debe8c0187 | ||
|
|
a69d857b8a | ||
|
|
a6ef9db84d | ||
|
|
9c27be752c | ||
|
|
82a65d4f9b | ||
|
|
8647f408ac | ||
|
|
e51cdcac50 | ||
|
|
55a544d976 | ||
|
|
3b943bd4ba | ||
|
|
502bb51a3b | ||
|
|
48977780ad | ||
|
|
e540bb03a4 | ||
|
|
919c52395d | ||
|
|
7f01c63be7 | ||
|
|
076d2f19d5 | ||
|
|
3c3150b8d3 | ||
|
|
b03d8c52e1 | ||
|
|
e67ccb080b | ||
|
|
dab02c347e | ||
|
|
6caa151e98 | ||
|
|
be6d8326a8 | ||
|
|
7855f8324b | ||
|
|
850e5330ad | ||
|
|
b7ea25eb59 | ||
|
|
8cdeac54ab | ||
|
|
752c70a06c | ||
|
|
83dd969dc1 | ||
|
|
a5bb117ff0 | ||
|
|
735b7f68fe | ||
|
|
bcac44f658 | ||
|
|
d46595eed8 | ||
|
|
62407927fa | ||
|
|
c7b0a84c6a | ||
|
|
1aac809c63 | ||
|
|
9b0575b589 | ||
|
|
9f3a578149 | ||
|
|
14b31d8b77 | ||
|
|
e26911f403 | ||
|
|
cd8d61a9d7 | ||
|
|
3a56161d03 | ||
|
|
0fd935b0ca | ||
|
|
704b2cc52d | ||
|
|
6b2dc8ce56 | ||
|
|
a585faf3d2 | ||
|
|
ca502cc9a5 | ||
|
|
8056a3dc7c |
85
.github/CONTRIBUTING.md
vendored
85
.github/CONTRIBUTING.md
vendored
@@ -188,6 +188,89 @@ To generate new vendored files with go modules run:
|
|||||||
$ make vendor
|
$ make vendor
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Generate profiling data
|
||||||
|
|
||||||
|
You can configure Buildx to generate [`pprof`](https://github.com/google/pprof)
|
||||||
|
memory and CPU profiles to analyze and optimize your builds. These profiles are
|
||||||
|
useful for identifying performance bottlenecks, detecting memory
|
||||||
|
inefficiencies, and ensuring the program (Buildx) runs efficiently.
|
||||||
|
|
||||||
|
The following environment variables control whether Buildx generates profiling
|
||||||
|
data for builds:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
When set, Buildx emits profiling samples for the builds to the location
|
||||||
|
specified by the environment variable.
|
||||||
|
|
||||||
|
To analyze and visualize profiling samples, you need `pprof` from the Go
|
||||||
|
toolchain, and (optionally) GraphViz for visualization in a graphical format.
|
||||||
|
|
||||||
|
To inspect profiling data with `pprof`:
|
||||||
|
|
||||||
|
1. Build a local binary of Buildx from source.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary gets exported to `./bin/build/buildx`.
|
||||||
|
|
||||||
|
2. Run a build and with the environment variables set to generate profiling data.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
$ ./bin/build/buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates `buildx_cpu.prof` and `buildx_mem.prof` for the build.
|
||||||
|
|
||||||
|
3. Start `pprof` and specify the filename of the profile that you want to
|
||||||
|
analyze.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof buildx_cpu.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
This opens the `pprof` interactive console. From here, you can inspect the
|
||||||
|
profiling sample using various commands. For example, use `top 10` command
|
||||||
|
to view the top 10 most time-consuming entries.
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
(pprof) top 10
|
||||||
|
Showing nodes accounting for 3.04s, 91.02% of 3.34s total
|
||||||
|
Dropped 123 nodes (cum <= 0.02s)
|
||||||
|
Showing top 10 nodes out of 159
|
||||||
|
flat flat% sum% cum cum%
|
||||||
|
1.14s 34.13% 34.13% 1.14s 34.13% syscall.syscall
|
||||||
|
0.91s 27.25% 61.38% 0.91s 27.25% runtime.kevent
|
||||||
|
0.35s 10.48% 71.86% 0.35s 10.48% runtime.pthread_cond_wait
|
||||||
|
0.22s 6.59% 78.44% 0.22s 6.59% runtime.pthread_cond_signal
|
||||||
|
0.15s 4.49% 82.93% 0.15s 4.49% runtime.usleep
|
||||||
|
0.10s 2.99% 85.93% 0.10s 2.99% runtime.memclrNoHeapPointers
|
||||||
|
0.10s 2.99% 88.92% 0.10s 2.99% runtime.memmove
|
||||||
|
0.03s 0.9% 89.82% 0.03s 0.9% runtime.madvise
|
||||||
|
0.02s 0.6% 90.42% 0.02s 0.6% runtime.(*mspan).typePointersOfUnchecked
|
||||||
|
0.02s 0.6% 91.02% 0.02s 0.6% runtime.pcvalue
|
||||||
|
```
|
||||||
|
|
||||||
|
To view the call graph in a GUI, run `go tool pprof -http=:8081 <sample>`.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Requires [GraphViz](https://www.graphviz.org/) to be installed.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof -http=:8081 buildx_cpu.prof
|
||||||
|
Serving web UI on http://127.0.0.1:8081
|
||||||
|
http://127.0.0.1:8081
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about using `pprof` and how to interpret the call graph,
|
||||||
|
refer to the [`pprof` README](https://github.com/google/pprof/blob/main/doc/README.md).
|
||||||
|
|
||||||
### Conventions
|
### Conventions
|
||||||
|
|
||||||
@@ -343,4 +426,4 @@ The rules:
|
|||||||
|
|
||||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||||
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||||
[Go Blog](https://blog.golang.org) is also a great resource.
|
[Go Blog](https://blog.golang.org) is also a great resource.
|
||||||
|
|||||||
5
.github/labeler.yml
vendored
5
.github/labeler.yml
vendored
@@ -96,6 +96,11 @@ area/hack:
|
|||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file: 'hack/**'
|
- any-glob-to-any-file: 'hack/**'
|
||||||
|
|
||||||
|
# Add 'area/history' label to changes in history command
|
||||||
|
area/history:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'commands/history/**'
|
||||||
|
|
||||||
# Add 'area/tests' label to changes in test files
|
# Add 'area/tests' label to changes in test files
|
||||||
area/tests:
|
area/tests:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
|
|||||||
165
.github/workflows/build.yml
vendored
165
.github/workflows/build.yml
vendored
@@ -28,15 +28,15 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILDX_VERSION: "latest"
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
SCOUT_VERSION: "1.11.0"
|
SCOUT_VERSION: "1.11.0"
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
TEST_CACHE_SCOPE: "test"
|
TEST_CACHE_SCOPE: "test"
|
||||||
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||||
GOTESTSUM_FORMAT: "standard-verbose"
|
GOTESTSUM_FORMAT: "standard-verbose"
|
||||||
GO_VERSION: "1.22"
|
GO_VERSION: "1.23"
|
||||||
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -54,9 +54,9 @@ jobs:
|
|||||||
- master
|
- master
|
||||||
- latest
|
- latest
|
||||||
- buildx-stable-1
|
- buildx-stable-1
|
||||||
- v0.15.2
|
- v0.20.1
|
||||||
- v0.14.1
|
- v0.19.0
|
||||||
- v0.13.2
|
- v0.18.2
|
||||||
worker:
|
worker:
|
||||||
- docker-container
|
- docker-container
|
||||||
- remote
|
- remote
|
||||||
@@ -76,6 +76,26 @@ jobs:
|
|||||||
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
pkg: ./tests
|
pkg: ./tests
|
||||||
mode: experimental
|
mode: experimental
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
@@ -86,7 +106,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
||||||
case "${{ matrix.worker }}" in
|
case "${{ matrix.worker }}" in
|
||||||
docker | docker+containerd)
|
docker | docker+containerd | docker@* | docker+containerd@*)
|
||||||
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@@ -111,13 +131,14 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build test image
|
name: Build test image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
targets: integration-test
|
targets: integration-test
|
||||||
set: |
|
set: |
|
||||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||||
@@ -131,7 +152,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: always()
|
if: always()
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
directory: ./bin/testreports
|
directory: ./bin/testreports
|
||||||
flags: integration
|
flags: integration
|
||||||
@@ -158,11 +179,16 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- ubuntu-24.04
|
- ubuntu-24.04
|
||||||
- macos-12
|
- macos-14
|
||||||
- windows-2022
|
- windows-2022
|
||||||
env:
|
env:
|
||||||
SKIP_INTEGRATION_TESTS: 1
|
SKIP_INTEGRATION_TESTS: 1
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Setup Git config
|
||||||
|
run: |
|
||||||
|
git config --global core.autocrlf false
|
||||||
|
git config --global core.eol lf
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -203,7 +229,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: always()
|
if: always()
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
directory: ${{ env.TESTREPORTS_DIR }}
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
env_vars: RUNNER_OS
|
env_vars: RUNNER_OS
|
||||||
@@ -224,27 +250,88 @@ jobs:
|
|||||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
path: ${{ env.TESTREPORTS_BASEDIR }}
|
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||||
|
|
||||||
govulncheck:
|
test-bsd-unit:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
continue-on-error: true
|
||||||
# required to write sarif report
|
strategy:
|
||||||
security-events: write
|
fail-fast: false
|
||||||
# required to check out the repository
|
matrix:
|
||||||
contents: read
|
os:
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- openbsd
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Sets semver Go version to be able to download tarball during vagrant setup
|
||||||
|
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
|
||||||
|
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Cache Vagrant boxes
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/.vagrant.d/boxes
|
||||||
|
key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-vagrant-${{ matrix.os }}-
|
||||||
|
-
|
||||||
|
name: Install vagrant
|
||||||
|
run: |
|
||||||
|
set -x
|
||||||
|
wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||||
|
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||||
|
sudo systemctl enable --now libvirtd
|
||||||
|
sudo chmod a+rw /var/run/libvirt/libvirt-sock
|
||||||
|
vagrant plugin install vagrant-libvirt
|
||||||
|
vagrant --version
|
||||||
|
-
|
||||||
|
name: Set up vagrant
|
||||||
|
run: |
|
||||||
|
ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
|
||||||
|
vagrant up --no-tty
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
|
||||||
|
vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
|
||||||
|
-
|
||||||
|
name: Upload coverage
|
||||||
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
files: ./coverage.txt
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit,${{ matrix.os }}
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
env:
|
||||||
|
RUNNER_OS: ${{ matrix.os }}
|
||||||
|
|
||||||
|
govulncheck:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: govulncheck
|
targets: govulncheck
|
||||||
env:
|
env:
|
||||||
@@ -298,8 +385,8 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
@@ -325,8 +412,14 @@ jobs:
|
|||||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Free disk space
|
||||||
uses: actions/checkout@v4
|
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||||
|
with:
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
swap-storage: true
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
@@ -334,8 +427,8 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
@@ -358,11 +451,11 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
${{ steps.meta.outputs.bake-file }}
|
cwd://${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
sbom: true
|
sbom: true
|
||||||
@@ -374,14 +467,13 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
# required to write sarif report
|
# required to write sarif report
|
||||||
security-events: write
|
security-events: write
|
||||||
needs:
|
needs:
|
||||||
- bin-image
|
- bin-image
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
@@ -404,6 +496,9 @@ jobs:
|
|||||||
|
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# required to create GitHub release
|
||||||
|
contents: write
|
||||||
needs:
|
needs:
|
||||||
- test-integration
|
- test-integration
|
||||||
- test-unit
|
- test-unit
|
||||||
@@ -433,7 +528,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
|
|||||||
11
.github/workflows/codeql.yml
vendored
11
.github/workflows/codeql.yml
vendored
@@ -17,16 +17,15 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: "1.22"
|
GO_VERSION: "1.23"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
codeql:
|
codeql:
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
actions: read
|
||||||
|
security-events: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
|||||||
15
.github/workflows/docs-release.yml
vendored
15
.github/workflows/docs-release.yml
vendored
@@ -19,10 +19,17 @@ on:
|
|||||||
types:
|
types:
|
||||||
- released
|
- released
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
open-pr:
|
open-pr:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout docs repo
|
name: Checkout docs repo
|
||||||
@@ -43,9 +50,13 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Generate yaml
|
name: Generate yaml
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
@@ -66,7 +77,7 @@ jobs:
|
|||||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||||
-
|
-
|
||||||
name: Create PR on docs repo
|
name: Create PR on docs repo
|
||||||
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
|
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
push-to-fork: docker-tools-robot/docker.github.io
|
||||||
|
|||||||
15
.github/workflows/docs-upstream.yml
vendored
15
.github/workflows/docs-upstream.yml
vendored
@@ -29,21 +29,24 @@ on:
|
|||||||
- '.github/workflows/docs-upstream.yml'
|
- '.github/workflows/docs-upstream.yml'
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docs-yaml:
|
docs-yaml:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build reference YAML docs
|
name: Build reference YAML docs
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
provenance: false
|
provenance: false
|
||||||
@@ -62,7 +65,7 @@ jobs:
|
|||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
validate:
|
validate:
|
||||||
uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
|
uses: docker/docs/.github/workflows/validate-upstream.yml@main
|
||||||
needs:
|
needs:
|
||||||
- docs-yaml
|
- docs-yaml
|
||||||
with:
|
with:
|
||||||
|
|||||||
91
.github/workflows/e2e.yml
vendored
91
.github/workflows/e2e.yml
vendored
@@ -26,23 +26,25 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
K3S_VERSION: "v1.21.2-k3s1"
|
K3S_VERSION: "v1.32.2+k3s1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: binaries
|
targets: binaries
|
||||||
set: |
|
set: |
|
||||||
@@ -63,7 +65,7 @@ jobs:
|
|||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
||||||
driver:
|
driver:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
strategy:
|
strategy:
|
||||||
@@ -151,7 +153,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
|
||||||
with:
|
with:
|
||||||
version: ${{ env.K3S_VERSION }}
|
version: ${{ env.K3S_VERSION }}
|
||||||
-
|
-
|
||||||
@@ -175,3 +177,78 @@ jobs:
|
|||||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||||
ENDPOINT: ${{ matrix.endpoint }}
|
ENDPOINT: ${{ matrix.endpoint }}
|
||||||
PLATFORMS: ${{ matrix.platforms }}
|
PLATFORMS: ${{ matrix.platforms }}
|
||||||
|
|
||||||
|
bake:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
env:
|
||||||
|
DOCKER_BUILD_CHECKS_ANNOTATIONS: false
|
||||||
|
DOCKER_BUILD_SUMMARY: false
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
-
|
||||||
|
# https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
|
||||||
|
source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
|
||||||
|
overrides: |
|
||||||
|
*.output=/tmp/bake-build
|
||||||
|
-
|
||||||
|
# https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
|
||||||
|
source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
|
||||||
|
targets: |
|
||||||
|
ld64-static-tgz
|
||||||
|
overrides: |
|
||||||
|
ld64-static-tgz.output=type=local,dest=./dist
|
||||||
|
ld64-static-tgz.platform=linux/amd64
|
||||||
|
ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
-
|
||||||
|
# https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
|
||||||
|
source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
|
||||||
|
targets: |
|
||||||
|
tests-buildkit
|
||||||
|
envs: |
|
||||||
|
BUILDKIT_REFS=v0.18.2
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Expose GitHub Runtime
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
-
|
||||||
|
name: Environment variables
|
||||||
|
if: matrix.envs != ''
|
||||||
|
run: |
|
||||||
|
for l in "${{ matrix.envs }}"; do
|
||||||
|
echo "${l?}" >> $GITHUB_ENV
|
||||||
|
done
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
-
|
||||||
|
name: Install buildx
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: binary
|
||||||
|
path: /home/runner/.docker/cli-plugins
|
||||||
|
-
|
||||||
|
name: Fix perms and check
|
||||||
|
run: |
|
||||||
|
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
|
||||||
|
docker buildx version
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: ${{ matrix.source }}
|
||||||
|
targets: ${{ matrix.targets }}
|
||||||
|
set: ${{ matrix.overrides }}
|
||||||
|
|||||||
8
.github/workflows/labeler.yml
vendored
8
.github/workflows/labeler.yml
vendored
@@ -18,10 +18,12 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
labeler:
|
labeler:
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required for writing labels
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
|
|||||||
13
.github/workflows/validate.yml
vendored
13
.github/workflows/validate.yml
vendored
@@ -25,6 +25,10 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.github/releases.json'
|
- '.github/releases.json'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
@@ -90,17 +94,16 @@ jobs:
|
|||||||
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
||||||
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Validate
|
name: Validate
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: ${{ matrix.target }}
|
targets: ${{ matrix.target }}
|
||||||
set: |
|
set: |
|
||||||
|
|||||||
@@ -1,26 +1,55 @@
|
|||||||
run:
|
run:
|
||||||
timeout: 30m
|
timeout: 30m
|
||||||
|
|
||||||
modules-download-mode: vendor
|
modules-download-mode: vendor
|
||||||
|
# default uses Go version from the go.mod file, fallback on the env var
|
||||||
|
# `GOVERSION`, fallback on 1.17: https://golangci-lint.run/usage/configuration/#run-configuration
|
||||||
|
go: "1.23"
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- bodyclose
|
||||||
- govet
|
|
||||||
- depguard
|
- depguard
|
||||||
|
- forbidigo
|
||||||
|
- gocritic
|
||||||
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- makezero
|
||||||
- misspell
|
- misspell
|
||||||
- unused
|
- noctx
|
||||||
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
|
- testifylint
|
||||||
- typecheck
|
- typecheck
|
||||||
- nolintlint
|
- unused
|
||||||
- gosec
|
- whitespace
|
||||||
- forbidigo
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gocritic:
|
||||||
|
disabled-checks:
|
||||||
|
- "ifElseChain"
|
||||||
|
- "assignOp"
|
||||||
|
- "appendAssign"
|
||||||
|
- "singleCaseSwitch"
|
||||||
|
- "exitAfterDefer" # FIXME
|
||||||
|
importas:
|
||||||
|
alias:
|
||||||
|
# Enforce alias to prevent it accidentally being used instead of
|
||||||
|
# buildkit errdefs package (or vice-versa).
|
||||||
|
- pkg: "github.com/containerd/errdefs"
|
||||||
|
alias: "cerrdefs"
|
||||||
|
# Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
|
||||||
|
- pkg: "github.com/docker/docker/client"
|
||||||
|
alias: "dockerclient"
|
||||||
|
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
alias: "ocispecs"
|
||||||
|
- pkg: "github.com/opencontainers/go-digest"
|
||||||
|
alias: "digest"
|
||||||
govet:
|
govet:
|
||||||
enable:
|
enable:
|
||||||
- nilness
|
- nilness
|
||||||
@@ -43,14 +72,27 @@ linters-settings:
|
|||||||
desc: The io/ioutil package has been deprecated.
|
desc: The io/ioutil package has been deprecated.
|
||||||
forbidigo:
|
forbidigo:
|
||||||
forbid:
|
forbid:
|
||||||
|
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||||
|
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||||
|
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||||
|
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||||
gosec:
|
gosec:
|
||||||
excludes:
|
excludes:
|
||||||
- G204 # Audit use of command execution
|
- G204 # Audit use of command execution
|
||||||
- G402 # TLS MinVersion too low
|
- G402 # TLS MinVersion too low
|
||||||
|
- G115 # integer overflow conversion (TODO: verify these)
|
||||||
config:
|
config:
|
||||||
G306: "0644"
|
G306: "0644"
|
||||||
|
testifylint:
|
||||||
|
disable:
|
||||||
|
# disable rules that reduce the test condition
|
||||||
|
- "empty"
|
||||||
|
- "bool-compare"
|
||||||
|
- "len"
|
||||||
|
- "negative-positive"
|
||||||
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-files:
|
exclude-files:
|
||||||
|
|||||||
36
Dockerfile
36
Dockerfile
@@ -1,20 +1,27 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22
|
ARG GO_VERSION=1.23
|
||||||
ARG XX_VERSION=1.5.0
|
ARG ALPINE_VERSION=3.21
|
||||||
|
ARG XX_VERSION=1.6.1
|
||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
ARG DOCKER_VERSION=27.2.1
|
ARG DOCKER_VERSION=28.0.0
|
||||||
|
ARG DOCKER_VERSION_ALT_27=27.5.1
|
||||||
|
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||||
ARG GOTESTSUM_VERSION=v1.9.0
|
ARG GOTESTSUM_VERSION=v1.12.0
|
||||||
ARG REGISTRY_VERSION=2.8.0
|
ARG REGISTRY_VERSION=2.8.3
|
||||||
ARG BUILDKIT_VERSION=v0.16.0
|
ARG BUILDKIT_VERSION=v0.20.1
|
||||||
ARG UNDOCK_VERSION=0.7.0
|
ARG UNDOCK_VERSION=0.9.0
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
||||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
|
||||||
FROM registry:$REGISTRY_VERSION AS registry
|
FROM registry:$REGISTRY_VERSION AS registry
|
||||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||||
@@ -77,6 +84,7 @@ RUN --mount=type=bind,target=. \
|
|||||||
set -e
|
set -e
|
||||||
xx-go --wrap
|
xx-go --wrap
|
||||||
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
||||||
|
file /usr/bin/docker-buildx
|
||||||
xx-verify --static /usr/bin/docker-buildx
|
xx-verify --static /usr/bin/docker-buildx
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
@@ -95,7 +103,10 @@ FROM scratch AS binaries-unix
|
|||||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
||||||
|
|
||||||
FROM binaries-unix AS binaries-darwin
|
FROM binaries-unix AS binaries-darwin
|
||||||
|
FROM binaries-unix AS binaries-freebsd
|
||||||
FROM binaries-unix AS binaries-linux
|
FROM binaries-unix AS binaries-linux
|
||||||
|
FROM binaries-unix AS binaries-netbsd
|
||||||
|
FROM binaries-unix AS binaries-openbsd
|
||||||
|
|
||||||
FROM scratch AS binaries-windows
|
FROM scratch AS binaries-windows
|
||||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
||||||
@@ -120,16 +131,21 @@ COPY --link --from=gotestsum /out /usr/bin/
|
|||||||
COPY --link --from=registry /bin/registry /usr/bin/
|
COPY --link --from=registry /bin/registry /usr/bin/
|
||||||
COPY --link --from=docker-engine / /usr/bin/
|
COPY --link --from=docker-engine / /usr/bin/
|
||||||
COPY --link --from=docker-cli / /usr/bin/
|
COPY --link --from=docker-cli / /usr/bin/
|
||||||
|
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
|
||||||
|
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
|
||||||
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||||
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||||
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||||
COPY --link --from=binaries /buildx /usr/bin/
|
COPY --link --from=binaries /buildx /usr/bin/
|
||||||
|
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
|
||||||
|
|
||||||
FROM integration-test-base AS integration-test
|
FROM integration-test-base AS integration-test
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Release
|
# Release
|
||||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=binaries \
|
RUN --mount=from=binaries \
|
||||||
@@ -144,7 +160,7 @@ COPY --from=releaser /out/ /
|
|||||||
|
|
||||||
# Shell
|
# Shell
|
||||||
FROM docker:$DOCKER_VERSION AS dockerd-release
|
FROM docker:$DOCKER_VERSION AS dockerd-release
|
||||||
FROM alpine AS shell
|
FROM alpine:${ALPINE_VERSION} AS shell
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||||
|
|||||||
609
bake/bake.go
609
bake/bake.go
@@ -2,6 +2,7 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -26,9 +27,7 @@ import (
|
|||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/tonistiigi/go-csvvalue"
|
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/convert"
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
)
|
)
|
||||||
@@ -46,6 +45,7 @@ type File struct {
|
|||||||
type Override struct {
|
type Override struct {
|
||||||
Value string
|
Value string
|
||||||
ArrValue []string
|
ArrValue []string
|
||||||
|
Append bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultFilenames() []string {
|
func defaultFilenames() []string {
|
||||||
@@ -53,8 +53,8 @@ func defaultFilenames() []string {
|
|||||||
names = append(names, composecli.DefaultFileNames...)
|
names = append(names, composecli.DefaultFileNames...)
|
||||||
names = append(names, []string{
|
names = append(names, []string{
|
||||||
"docker-bake.json",
|
"docker-bake.json",
|
||||||
"docker-bake.override.json",
|
|
||||||
"docker-bake.hcl",
|
"docker-bake.hcl",
|
||||||
|
"docker-bake.override.json",
|
||||||
"docker-bake.override.hcl",
|
"docker-bake.override.hcl",
|
||||||
}...)
|
}...)
|
||||||
return names
|
return names
|
||||||
@@ -193,7 +193,7 @@ func ListTargets(files []File) ([]string, error) {
|
|||||||
return dedupSlice(targets), nil
|
return dedupSlice(targets), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) {
|
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string, ent *EntitlementConf) (map[string]*Target, map[string]*Group, error) {
|
||||||
c, _, err := ParseFiles(files, defaults)
|
c, _, err := ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -207,23 +207,24 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
m := map[string]*Target{}
|
|
||||||
n := map[string]*Group{}
|
targetsMap := map[string]*Target{}
|
||||||
|
groupsMap := map[string]*Group{}
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
ts, gs := c.ResolveGroup(target)
|
ts, gs := c.ResolveGroup(target)
|
||||||
for _, tname := range ts {
|
for _, tname := range ts {
|
||||||
t, err := c.ResolveTarget(tname, o)
|
t, err := c.ResolveTarget(tname, o, ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if t != nil {
|
if t != nil {
|
||||||
m[tname] = t
|
targetsMap[tname] = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, gname := range gs {
|
for _, gname := range gs {
|
||||||
for _, group := range c.Groups {
|
for _, group := range c.Groups {
|
||||||
if group.Name == gname {
|
if group.Name == gname {
|
||||||
n[gname] = group
|
groupsMap[gname] = group
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -231,25 +232,26 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
if target == "default" {
|
if _, ok := groupsMap["default"]; ok && target == "default" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := n["default"]; !ok {
|
if _, ok := groupsMap["default"]; !ok {
|
||||||
n["default"] = &Group{Name: "default"}
|
groupsMap["default"] = &Group{Name: "default"}
|
||||||
}
|
}
|
||||||
n["default"].Targets = append(n["default"].Targets, target)
|
groupsMap["default"].Targets = append(groupsMap["default"].Targets, target)
|
||||||
}
|
}
|
||||||
if g, ok := n["default"]; ok {
|
if g, ok := groupsMap["default"]; ok {
|
||||||
g.Targets = dedupSlice(g.Targets)
|
g.Targets = dedupSlice(g.Targets)
|
||||||
|
sort.Strings(g.Targets)
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, t := range m {
|
for name, t := range targetsMap {
|
||||||
if err := c.loadLinks(name, t, m, o, nil); err != nil {
|
if err := c.loadLinks(name, t, targetsMap, o, nil, ent); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, n, nil
|
return targetsMap, groupsMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupSlice(s []string) []string {
|
func dedupSlice(s []string) []string {
|
||||||
@@ -476,7 +478,7 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
|
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string, ent *EntitlementConf) error {
|
||||||
visited = append(visited, name)
|
visited = append(visited, name)
|
||||||
for _, v := range t.Contexts {
|
for _, v := range t.Contexts {
|
||||||
if strings.HasPrefix(v, "target:") {
|
if strings.HasPrefix(v, "target:") {
|
||||||
@@ -484,23 +486,23 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
|||||||
if target == name {
|
if target == name {
|
||||||
return errors.Errorf("target %s cannot link to itself", target)
|
return errors.Errorf("target %s cannot link to itself", target)
|
||||||
}
|
}
|
||||||
for _, v := range visited {
|
if slices.Contains(visited, target) {
|
||||||
if v == target {
|
return errors.Errorf("infinite loop from %s to %s", name, target)
|
||||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
t2, ok := m[target]
|
t2, ok := m[target]
|
||||||
if !ok {
|
if !ok {
|
||||||
var err error
|
var err error
|
||||||
t2, err = c.ResolveTarget(target, o)
|
t2, err = c.ResolveTarget(target, o, ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t2.Outputs = []string{"type=cacheonly"}
|
t2.Outputs = []*buildflags.ExportEntry{
|
||||||
|
{Type: "cacheonly"},
|
||||||
|
}
|
||||||
t2.linked = true
|
t2.linked = true
|
||||||
m[target] = t2
|
m[target] = t2
|
||||||
}
|
}
|
||||||
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
|
if err := c.loadLinks(target, t2, m, o, visited, ent); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -512,8 +514,8 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
||||||
if !sliceEqual(t.Platforms, t2.Platforms) {
|
if !isSubset(t.Platforms, t2.Platforms) {
|
||||||
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
|
return errors.Errorf("target %s can't be used by %s because its platforms %v are not a subset of %v", target, name, t.Platforms, t2.Platforms)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -525,9 +527,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
m := map[string]map[string]Override{}
|
m := map[string]map[string]Override{}
|
||||||
for _, v := range v {
|
for _, v := range v {
|
||||||
parts := strings.SplitN(v, "=", 2)
|
parts := strings.SplitN(v, "=", 2)
|
||||||
keys := strings.SplitN(parts[0], ".", 3)
|
|
||||||
|
skey := strings.TrimSuffix(parts[0], "+")
|
||||||
|
appendTo := strings.HasSuffix(parts[0], "+")
|
||||||
|
keys := strings.SplitN(skey, ".", 3)
|
||||||
if len(keys) < 2 {
|
if len(keys) < 2 {
|
||||||
return nil, errors.Errorf("invalid override key %s, expected target.name", parts[0])
|
return nil, errors.Errorf("invalid override key %s, expected target.name", skey)
|
||||||
}
|
}
|
||||||
|
|
||||||
pattern := keys[0]
|
pattern := keys[0]
|
||||||
@@ -540,8 +545,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kk := strings.SplitN(parts[0], ".", 2)
|
okey := strings.Join(keys[1:], ".")
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
t, ok := m[name]
|
t, ok := m[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -549,12 +553,15 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
m[name] = t
|
m[name] = t
|
||||||
}
|
}
|
||||||
|
|
||||||
o := t[kk[1]]
|
override := t[okey]
|
||||||
|
|
||||||
|
// IMPORTANT: if you add more fields here, do not forget to update
|
||||||
|
// docs/reference/buildx_bake.md (--set) and https://docs.docker.com/build/bake/overrides/
|
||||||
switch keys[1] {
|
switch keys[1] {
|
||||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network", "annotations":
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.ArrValue = append(o.ArrValue, parts[1])
|
override.Append = appendTo
|
||||||
|
override.ArrValue = append(override.ArrValue, parts[1])
|
||||||
}
|
}
|
||||||
case "args":
|
case "args":
|
||||||
if len(keys) != 3 {
|
if len(keys) != 3 {
|
||||||
@@ -565,7 +572,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
o.Value = v
|
override.Value = v
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case "contexts":
|
case "contexts":
|
||||||
@@ -575,11 +582,11 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.Value = parts[1]
|
override.Value = parts[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t[kk[1]] = o
|
t[okey] = override
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
@@ -627,8 +634,8 @@ func (c Config) group(name string, visited map[string]visit) ([]string, []string
|
|||||||
return targets, groups
|
return targets, groups
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override, ent *EntitlementConf) (*Target, error) {
|
||||||
t, err := c.target(name, map[string]*Target{}, overrides)
|
t, err := c.target(name, map[string]*Target{}, overrides, ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -644,7 +651,7 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override, ent *EntitlementConf) (*Target, error) {
|
||||||
if t, ok := visited[name]; ok {
|
if t, ok := visited[name]; ok {
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
@@ -661,7 +668,7 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
}
|
}
|
||||||
tt := &Target{}
|
tt := &Target{}
|
||||||
for _, name := range t.Inherits {
|
for _, name := range t.Inherits {
|
||||||
t, err := c.target(name, visited, overrides)
|
t, err := c.target(name, visited, overrides, ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -673,7 +680,7 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
m.Merge(tt)
|
m.Merge(tt)
|
||||||
m.Merge(t)
|
m.Merge(t)
|
||||||
tt = m
|
tt = m
|
||||||
if err := tt.AddOverrides(overrides[name]); err != nil {
|
if err := tt.AddOverrides(overrides[name], ent); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tt.normalize()
|
tt.normalize()
|
||||||
@@ -695,59 +702,61 @@ type Target struct {
|
|||||||
// Inherits is the only field that cannot be overridden with --set
|
// Inherits is the only field that cannot be overridden with --set
|
||||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
||||||
|
|
||||||
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
|
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
|
||||||
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
Attest buildflags.Attests `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
||||||
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
||||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
||||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
||||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
|
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
|
||||||
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
|
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
|
||||||
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
|
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
|
||||||
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
|
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
|
||||||
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
|
CacheFrom buildflags.CacheOptions `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
|
||||||
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
|
CacheTo buildflags.CacheOptions `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
|
||||||
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
|
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
|
||||||
Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
|
Secrets buildflags.Secrets `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
|
||||||
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
|
SSH buildflags.SSHKeys `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
|
||||||
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
|
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
|
||||||
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
Outputs buildflags.Exports `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
||||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
||||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
||||||
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
|
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
|
||||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
||||||
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional" cty:"shm-size"`
|
||||||
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional" cty:"ulimits"`
|
||||||
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
||||||
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
||||||
|
|
||||||
// linked is a private field to mark a target used as a linked one
|
// linked is a private field to mark a target used as a linked one
|
||||||
linked bool
|
linked bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ hclparser.WithEvalContexts = &Target{}
|
var (
|
||||||
var _ hclparser.WithGetName = &Target{}
|
_ hclparser.WithEvalContexts = &Target{}
|
||||||
var _ hclparser.WithEvalContexts = &Group{}
|
_ hclparser.WithGetName = &Target{}
|
||||||
var _ hclparser.WithGetName = &Group{}
|
_ hclparser.WithEvalContexts = &Group{}
|
||||||
|
_ hclparser.WithGetName = &Group{}
|
||||||
|
)
|
||||||
|
|
||||||
func (t *Target) normalize() {
|
func (t *Target) normalize() {
|
||||||
t.Annotations = removeDupes(t.Annotations)
|
t.Annotations = removeDupesStr(t.Annotations)
|
||||||
t.Attest = removeAttestDupes(t.Attest)
|
t.Attest = t.Attest.Normalize()
|
||||||
t.Tags = removeDupes(t.Tags)
|
t.Tags = removeDupesStr(t.Tags)
|
||||||
t.Secrets = removeDupes(t.Secrets)
|
t.Secrets = t.Secrets.Normalize()
|
||||||
t.SSH = removeDupes(t.SSH)
|
t.SSH = t.SSH.Normalize()
|
||||||
t.Platforms = removeDupes(t.Platforms)
|
t.Platforms = removeDupesStr(t.Platforms)
|
||||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
t.CacheFrom = t.CacheFrom.Normalize()
|
||||||
t.CacheTo = removeDupes(t.CacheTo)
|
t.CacheTo = t.CacheTo.Normalize()
|
||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = t.Outputs.Normalize()
|
||||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
t.NoCacheFilter = removeDupesStr(t.NoCacheFilter)
|
||||||
t.Ulimits = removeDupes(t.Ulimits)
|
t.Ulimits = removeDupesStr(t.Ulimits)
|
||||||
|
|
||||||
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
||||||
t.Entitlements = append(t.Entitlements, "network.host")
|
t.Entitlements = append(t.Entitlements, "network.host")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Entitlements = removeDupes(t.Entitlements)
|
t.Entitlements = removeDupesStr(t.Entitlements)
|
||||||
|
|
||||||
for k, v := range t.Contexts {
|
for k, v := range t.Contexts {
|
||||||
if v == "" {
|
if v == "" {
|
||||||
@@ -806,20 +815,19 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
t.Annotations = append(t.Annotations, t2.Annotations...)
|
t.Annotations = append(t.Annotations, t2.Annotations...)
|
||||||
}
|
}
|
||||||
if t2.Attest != nil { // merge
|
if t2.Attest != nil { // merge
|
||||||
t.Attest = append(t.Attest, t2.Attest...)
|
t.Attest = t.Attest.Merge(t2.Attest)
|
||||||
t.Attest = removeAttestDupes(t.Attest)
|
|
||||||
}
|
}
|
||||||
if t2.Secrets != nil { // merge
|
if t2.Secrets != nil { // merge
|
||||||
t.Secrets = append(t.Secrets, t2.Secrets...)
|
t.Secrets = t.Secrets.Merge(t2.Secrets)
|
||||||
}
|
}
|
||||||
if t2.SSH != nil { // merge
|
if t2.SSH != nil { // merge
|
||||||
t.SSH = append(t.SSH, t2.SSH...)
|
t.SSH = t.SSH.Merge(t2.SSH)
|
||||||
}
|
}
|
||||||
if t2.Platforms != nil { // no merge
|
if t2.Platforms != nil { // no merge
|
||||||
t.Platforms = t2.Platforms
|
t.Platforms = t2.Platforms
|
||||||
}
|
}
|
||||||
if t2.CacheFrom != nil { // merge
|
if t2.CacheFrom != nil { // merge
|
||||||
t.CacheFrom = append(t.CacheFrom, t2.CacheFrom...)
|
t.CacheFrom = t.CacheFrom.Merge(t2.CacheFrom)
|
||||||
}
|
}
|
||||||
if t2.CacheTo != nil { // no merge
|
if t2.CacheTo != nil { // no merge
|
||||||
t.CacheTo = t2.CacheTo
|
t.CacheTo = t2.CacheTo
|
||||||
@@ -854,7 +862,9 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Target) AddOverrides(overrides map[string]Override) error {
|
func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementConf) error {
|
||||||
|
// IMPORTANT: if you add more fields here, do not forget to update
|
||||||
|
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
|
||||||
for key, o := range overrides {
|
for key, o := range overrides {
|
||||||
value := o.Value
|
value := o.Value
|
||||||
keys := strings.SplitN(key, ".", 2)
|
keys := strings.SplitN(key, ".", 2)
|
||||||
@@ -865,7 +875,7 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.Dockerfile = &value
|
t.Dockerfile = &value
|
||||||
case "args":
|
case "args":
|
||||||
if len(keys) != 2 {
|
if len(keys) != 2 {
|
||||||
return errors.Errorf("args require name")
|
return errors.Errorf("invalid format for args, expecting args.<name>=<value>")
|
||||||
}
|
}
|
||||||
if t.Args == nil {
|
if t.Args == nil {
|
||||||
t.Args = map[string]*string{}
|
t.Args = map[string]*string{}
|
||||||
@@ -873,7 +883,7 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.Args[keys[1]] = &value
|
t.Args[keys[1]] = &value
|
||||||
case "contexts":
|
case "contexts":
|
||||||
if len(keys) != 2 {
|
if len(keys) != 2 {
|
||||||
return errors.Errorf("contexts require name")
|
return errors.Errorf("invalid format for contexts, expecting contexts.<name>=<value>")
|
||||||
}
|
}
|
||||||
if t.Contexts == nil {
|
if t.Contexts == nil {
|
||||||
t.Contexts = map[string]string{}
|
t.Contexts = map[string]string{}
|
||||||
@@ -881,36 +891,122 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.Contexts[keys[1]] = value
|
t.Contexts[keys[1]] = value
|
||||||
case "labels":
|
case "labels":
|
||||||
if len(keys) != 2 {
|
if len(keys) != 2 {
|
||||||
return errors.Errorf("labels require name")
|
return errors.Errorf("invalid format for labels, expecting labels.<name>=<value>")
|
||||||
}
|
}
|
||||||
if t.Labels == nil {
|
if t.Labels == nil {
|
||||||
t.Labels = map[string]*string{}
|
t.Labels = map[string]*string{}
|
||||||
}
|
}
|
||||||
t.Labels[keys[1]] = &value
|
t.Labels[keys[1]] = &value
|
||||||
case "tags":
|
case "tags":
|
||||||
t.Tags = o.ArrValue
|
if o.Append {
|
||||||
|
t.Tags = append(t.Tags, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Tags = o.ArrValue
|
||||||
|
}
|
||||||
case "cache-from":
|
case "cache-from":
|
||||||
t.CacheFrom = o.ArrValue
|
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if o.Append {
|
||||||
|
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||||
|
} else {
|
||||||
|
t.CacheFrom = cacheFrom
|
||||||
|
}
|
||||||
|
for _, c := range t.CacheFrom {
|
||||||
|
if c.Type == "local" {
|
||||||
|
if v, ok := c.Attrs["src"]; ok {
|
||||||
|
ent.FSRead = append(ent.FSRead, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
case "cache-to":
|
case "cache-to":
|
||||||
t.CacheTo = o.ArrValue
|
cacheTo, err := buildflags.ParseCacheEntry(o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if o.Append {
|
||||||
|
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||||
|
} else {
|
||||||
|
t.CacheTo = cacheTo
|
||||||
|
}
|
||||||
|
for _, c := range t.CacheTo {
|
||||||
|
if c.Type == "local" {
|
||||||
|
if v, ok := c.Attrs["dest"]; ok {
|
||||||
|
ent.FSWrite = append(ent.FSWrite, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
case "target":
|
case "target":
|
||||||
t.Target = &value
|
t.Target = &value
|
||||||
case "call":
|
case "call":
|
||||||
t.Call = &value
|
t.Call = &value
|
||||||
case "secrets":
|
case "secrets":
|
||||||
t.Secrets = o.ArrValue
|
secrets, err := parseArrValue[buildflags.Secret](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
if o.Append {
|
||||||
|
t.Secrets = t.Secrets.Merge(secrets)
|
||||||
|
} else {
|
||||||
|
t.Secrets = secrets
|
||||||
|
}
|
||||||
|
for _, s := range t.Secrets {
|
||||||
|
if s.FilePath != "" {
|
||||||
|
ent.FSRead = append(ent.FSRead, s.FilePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
case "ssh":
|
case "ssh":
|
||||||
t.SSH = o.ArrValue
|
ssh, err := parseArrValue[buildflags.SSH](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
if o.Append {
|
||||||
|
t.SSH = t.SSH.Merge(ssh)
|
||||||
|
} else {
|
||||||
|
t.SSH = ssh
|
||||||
|
}
|
||||||
|
for _, s := range t.SSH {
|
||||||
|
ent.FSRead = append(ent.FSRead, s.Paths...)
|
||||||
|
}
|
||||||
case "platform":
|
case "platform":
|
||||||
t.Platforms = o.ArrValue
|
if o.Append {
|
||||||
|
t.Platforms = append(t.Platforms, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Platforms = o.ArrValue
|
||||||
|
}
|
||||||
case "output":
|
case "output":
|
||||||
t.Outputs = o.ArrValue
|
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
if o.Append {
|
||||||
|
t.Outputs = t.Outputs.Merge(outputs)
|
||||||
|
} else {
|
||||||
|
t.Outputs = outputs
|
||||||
|
}
|
||||||
|
for _, o := range t.Outputs {
|
||||||
|
if o.Destination != "" {
|
||||||
|
ent.FSWrite = append(ent.FSWrite, o.Destination)
|
||||||
|
}
|
||||||
|
}
|
||||||
case "entitlements":
|
case "entitlements":
|
||||||
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
||||||
|
for _, v := range o.ArrValue {
|
||||||
|
if v == string(EntitlementKeyNetworkHost) {
|
||||||
|
ent.NetworkHost = true
|
||||||
|
} else if v == string(EntitlementKeySecurityInsecure) {
|
||||||
|
ent.SecurityInsecure = true
|
||||||
|
}
|
||||||
|
}
|
||||||
case "annotations":
|
case "annotations":
|
||||||
t.Annotations = append(t.Annotations, o.ArrValue...)
|
t.Annotations = append(t.Annotations, o.ArrValue...)
|
||||||
case "attest":
|
case "attest":
|
||||||
t.Attest = append(t.Attest, o.ArrValue...)
|
attest, err := parseArrValue[buildflags.Attest](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for attest")
|
||||||
|
}
|
||||||
|
t.Attest = t.Attest.Merge(attest)
|
||||||
case "no-cache":
|
case "no-cache":
|
||||||
noCache, err := strconv.ParseBool(value)
|
noCache, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -918,11 +1014,19 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
}
|
}
|
||||||
t.NoCache = &noCache
|
t.NoCache = &noCache
|
||||||
case "no-cache-filter":
|
case "no-cache-filter":
|
||||||
t.NoCacheFilter = o.ArrValue
|
if o.Append {
|
||||||
|
t.NoCacheFilter = append(t.NoCacheFilter, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.NoCacheFilter = o.ArrValue
|
||||||
|
}
|
||||||
case "shm-size":
|
case "shm-size":
|
||||||
t.ShmSize = &value
|
t.ShmSize = &value
|
||||||
case "ulimits":
|
case "ulimits":
|
||||||
t.Ulimits = o.ArrValue
|
if o.Append {
|
||||||
|
t.Ulimits = append(t.Ulimits, o.ArrValue...)
|
||||||
|
} else {
|
||||||
|
t.Ulimits = o.ArrValue
|
||||||
|
}
|
||||||
case "network":
|
case "network":
|
||||||
t.NetworkMode = &value
|
t.NetworkMode = &value
|
||||||
case "pull":
|
case "pull":
|
||||||
@@ -1063,7 +1167,9 @@ func (t *Target) GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(
|
|||||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||||
// make sure local credentials are loaded multiple times for different targets
|
// make sure local credentials are loaded multiple times for different targets
|
||||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
||||||
authProvider := authprovider.NewDockerAuthProvider(dockerConfig, nil)
|
authProvider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
|
||||||
|
ConfigFile: dockerConfig,
|
||||||
|
})
|
||||||
|
|
||||||
m2 := make(map[string]build.Options, len(m))
|
m2 := make(map[string]build.Options, len(m))
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
@@ -1115,62 +1221,44 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
t.ContextState = &st
|
t.ContextState = &st
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateContextsEntitlements is a basic check to ensure contexts do not
|
func isRemoteContext(t build.Inputs, inp *Input) bool {
|
||||||
// escape local directories when loaded from remote sources. This is to be
|
if build.IsRemoteURL(t.ContextPath) {
|
||||||
// replaced with proper entitlements support in the future.
|
return true
|
||||||
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
|
|
||||||
if inp == nil || inp.State == nil {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
if inp != nil && build.IsRemoteURL(inp.URL) && !strings.HasPrefix(t.ContextPath, "cwd://") {
|
||||||
if vv, _ := strconv.ParseBool(v); vv {
|
return true
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectLocalPaths(t build.Inputs) []string {
|
||||||
|
var out []string
|
||||||
if t.ContextState == nil {
|
if t.ContextState == nil {
|
||||||
if err := checkPath(t.ContextPath); err != nil {
|
if v, ok := isLocalPath(t.ContextPath); ok {
|
||||||
return err
|
out = append(out, v)
|
||||||
}
|
}
|
||||||
|
if v, ok := isLocalPath(t.DockerfilePath); ok {
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(t.ContextPath, "cwd://") {
|
||||||
|
out = append(out, strings.TrimPrefix(t.ContextPath, "cwd://"))
|
||||||
}
|
}
|
||||||
for _, v := range t.NamedContexts {
|
for _, v := range t.NamedContexts {
|
||||||
if v.State != nil {
|
if v.State != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := checkPath(v.Path); err != nil {
|
if v, ok := isLocalPath(v.Path); ok {
|
||||||
return err
|
out = append(out, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPath(p string) error {
|
func isLocalPath(p string) (string, bool) {
|
||||||
if build.IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
if build.IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
||||||
return nil
|
return "", false
|
||||||
}
|
}
|
||||||
p, err := filepath.EvalSymlinks(p)
|
return strings.TrimPrefix(p, "cwd://"), true
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p, err = filepath.Abs(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wd, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rel, err := filepath.Rel(wd, p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
parts := strings.Split(rel, string(os.PathSeparator))
|
|
||||||
if parts[0] == ".." {
|
|
||||||
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||||
@@ -1210,9 +1298,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
// it's not outside the working directory and then resolve it to an
|
// it's not outside the working directory and then resolve it to an
|
||||||
// absolute path.
|
// absolute path.
|
||||||
bi.DockerfilePath = path.Clean(strings.TrimPrefix(bi.DockerfilePath, "cwd://"))
|
bi.DockerfilePath = path.Clean(strings.TrimPrefix(bi.DockerfilePath, "cwd://"))
|
||||||
if err := checkPath(bi.DockerfilePath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
bi.DockerfilePath, err = filepath.Abs(bi.DockerfilePath)
|
bi.DockerfilePath, err = filepath.Abs(bi.DockerfilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1249,10 +1334,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := validateContextsEntitlements(bi, inp); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Context = &bi.ContextPath
|
t.Context = &bi.ContextPath
|
||||||
|
|
||||||
args := map[string]string{}
|
args := map[string]string{}
|
||||||
@@ -1309,24 +1390,35 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
bo.Platforms = platforms
|
bo.Platforms = platforms
|
||||||
|
|
||||||
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
secrets := t.Secrets
|
||||||
if err != nil {
|
if isRemoteContext(bi, inp) {
|
||||||
return nil, err
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
|
||||||
|
secrets = append(secrets, &buildflags.Secret{
|
||||||
|
ID: llb.GitAuthTokenKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
|
||||||
|
secrets = append(secrets, &buildflags.Secret{
|
||||||
|
ID: llb.GitAuthHeaderKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
secretAttachment, err := controllerapi.CreateSecrets(secrets)
|
secrets = secrets.Normalize()
|
||||||
|
bo.SecretSpecs = secrets.ToPB()
|
||||||
|
secretAttachment, err := controllerapi.CreateSecrets(bo.SecretSpecs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bo.Session = append(bo.Session, secretAttachment)
|
bo.Session = append(bo.Session, secretAttachment)
|
||||||
|
|
||||||
sshSpecs, err := buildflags.ParseSSHSpecs(t.SSH)
|
bo.SSHSpecs = t.SSH.ToPB()
|
||||||
if err != nil {
|
if len(bo.SSHSpecs) == 0 && buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL)) {
|
||||||
return nil, err
|
bo.SSHSpecs = []*controllerapi.SSH{{ID: "default"}}
|
||||||
}
|
}
|
||||||
if len(sshSpecs) == 0 && (buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL))) {
|
|
||||||
sshSpecs = append(sshSpecs, &controllerapi.SSH{ID: "default"})
|
sshAttachment, err := controllerapi.CreateSSH(bo.SSHSpecs)
|
||||||
}
|
|
||||||
sshAttachment, err := controllerapi.CreateSSH(sshSpecs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1342,23 +1434,14 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
if t.CacheFrom != nil {
|
||||||
if err != nil {
|
bo.CacheFrom = controllerapi.CreateCaches(t.CacheFrom.ToPB())
|
||||||
return nil, err
|
}
|
||||||
|
if t.CacheTo != nil {
|
||||||
|
bo.CacheTo = controllerapi.CreateCaches(t.CacheTo.ToPB())
|
||||||
}
|
}
|
||||||
bo.CacheFrom = controllerapi.CreateCaches(cacheImports)
|
|
||||||
|
|
||||||
cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
|
bo.Exports, bo.ExportsLocalPathsTemporary, err = controllerapi.CreateExports(t.Outputs.ToPB())
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bo.CacheTo = controllerapi.CreateCaches(cacheExports)
|
|
||||||
|
|
||||||
outputs, err := buildflags.ParseExports(t.Outputs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bo.Exports, err = controllerapi.CreateExports(outputs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1373,11 +1456,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
attests, err := buildflags.ParseAttests(t.Attest)
|
bo.Attests = controllerapi.CreateAttestations(t.Attest.ToPB())
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bo.Attests = controllerapi.CreateAttestations(attests)
|
|
||||||
|
|
||||||
bo.SourcePolicy, err = build.ReadSourcePolicy()
|
bo.SourcePolicy, err = build.ReadSourcePolicy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1392,9 +1471,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
bo.Ulimits = ulimits
|
bo.Ulimits = ulimits
|
||||||
|
|
||||||
for _, ent := range t.Entitlements {
|
bo.Allow = append(bo.Allow, t.Entitlements...)
|
||||||
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
|
||||||
}
|
|
||||||
|
|
||||||
return bo, nil
|
return bo, nil
|
||||||
}
|
}
|
||||||
@@ -1403,7 +1480,7 @@ func defaultTarget() *Target {
|
|||||||
return &Target{}
|
return &Target{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeDupes(s []string) []string {
|
func removeDupesStr(s []string) []string {
|
||||||
i := 0
|
i := 0
|
||||||
seen := make(map[string]struct{}, len(s))
|
seen := make(map[string]struct{}, len(s))
|
||||||
for _, v := range s {
|
for _, v := range s {
|
||||||
@@ -1420,106 +1497,76 @@ func removeDupes(s []string) []string {
|
|||||||
return s[:i]
|
return s[:i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeAttestDupes(s []string) []string {
|
func setPushOverride(outputs []*buildflags.ExportEntry, push bool) []*buildflags.ExportEntry {
|
||||||
res := []string{}
|
if !push {
|
||||||
m := map[string]int{}
|
// Disable push for any relevant export types
|
||||||
for _, v := range s {
|
for i := 0; i < len(outputs); {
|
||||||
att, err := buildflags.ParseAttest(v)
|
output := outputs[i]
|
||||||
if err != nil {
|
switch output.Type {
|
||||||
res = append(res, v)
|
case "registry":
|
||||||
continue
|
// Filter out registry output type
|
||||||
}
|
outputs[i], outputs[len(outputs)-1] = outputs[len(outputs)-1], outputs[i]
|
||||||
|
outputs = outputs[:len(outputs)-1]
|
||||||
if i, ok := m[att.Type]; ok {
|
continue
|
||||||
res[i] = v
|
case "image":
|
||||||
} else {
|
// Override push attribute
|
||||||
m[att.Type] = len(res)
|
output.Attrs["push"] = "false"
|
||||||
res = append(res, v)
|
}
|
||||||
|
i++
|
||||||
}
|
}
|
||||||
|
return outputs
|
||||||
}
|
}
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOutput(str string) map[string]string {
|
// Force push to be enabled
|
||||||
fields, err := csvvalue.Fields(str, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
res := map[string]string{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) == 2 {
|
|
||||||
res[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOutputType(str string) string {
|
|
||||||
if out := parseOutput(str); out != nil {
|
|
||||||
if v, ok := out["type"]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPushOverride(outputs []string, push bool) []string {
|
|
||||||
var out []string
|
|
||||||
setPush := true
|
setPush := true
|
||||||
for _, output := range outputs {
|
for _, output := range outputs {
|
||||||
typ := parseOutputType(output)
|
if output.Type != "docker" {
|
||||||
if typ == "image" || typ == "registry" {
|
// If there is an output type that is not docker, don't set "push"
|
||||||
// no need to set push if image or registry types already defined
|
|
||||||
setPush = false
|
setPush = false
|
||||||
if typ == "registry" {
|
}
|
||||||
if !push {
|
|
||||||
// don't set registry output if "push" is false
|
// Set push attribute for image
|
||||||
continue
|
if output.Type == "image" {
|
||||||
}
|
output.Attrs["push"] = "true"
|
||||||
// no need to set "push" attribute to true for registry
|
|
||||||
out = append(out, output)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out = append(out, output+",push="+strconv.FormatBool(push))
|
|
||||||
} else {
|
|
||||||
if typ != "docker" {
|
|
||||||
// if there is any output that is not docker, don't set "push"
|
|
||||||
setPush = false
|
|
||||||
}
|
|
||||||
out = append(out, output)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if push && setPush {
|
|
||||||
out = append(out, "type=image,push=true")
|
if setPush {
|
||||||
|
// No existing output that pushes so add one
|
||||||
|
outputs = append(outputs, &buildflags.ExportEntry{
|
||||||
|
Type: "image",
|
||||||
|
Attrs: map[string]string{
|
||||||
|
"push": "true",
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return out
|
return outputs
|
||||||
}
|
}
|
||||||
|
|
||||||
func setLoadOverride(outputs []string, load bool) []string {
|
func setLoadOverride(outputs []*buildflags.ExportEntry, load bool) []*buildflags.ExportEntry {
|
||||||
if !load {
|
if !load {
|
||||||
return outputs
|
return outputs
|
||||||
}
|
}
|
||||||
setLoad := true
|
|
||||||
for _, output := range outputs {
|
for _, output := range outputs {
|
||||||
if typ := parseOutputType(output); typ == "docker" {
|
switch output.Type {
|
||||||
if v := parseOutput(output); v != nil {
|
case "docker":
|
||||||
// dest set means we want to output as tar so don't set load
|
// if dest is not set, we can reuse this entry and do not need to set load
|
||||||
if _, ok := v["dest"]; !ok {
|
if output.Destination == "" {
|
||||||
setLoad = false
|
return outputs
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if typ != "image" && typ != "registry" && typ != "oci" {
|
case "image", "registry", "oci":
|
||||||
|
// Ignore
|
||||||
|
default:
|
||||||
// if there is any output that is not an image, registry
|
// if there is any output that is not an image, registry
|
||||||
// or oci, don't set "load" similar to push override
|
// or oci, don't set "load" similar to push override
|
||||||
setLoad = false
|
return outputs
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if setLoad {
|
|
||||||
outputs = append(outputs, "type=docker")
|
outputs = append(outputs, &buildflags.ExportEntry{
|
||||||
}
|
Type: "docker",
|
||||||
|
})
|
||||||
return outputs
|
return outputs
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1537,14 +1584,9 @@ func sanitizeTargetName(target string) string {
|
|||||||
return strings.ReplaceAll(target, ".", "_")
|
return strings.ReplaceAll(target, ".", "_")
|
||||||
}
|
}
|
||||||
|
|
||||||
func sliceEqual(s1, s2 []string) bool {
|
func isSubset(s1, s2 []string) bool {
|
||||||
if len(s1) != len(s2) {
|
for _, item := range s1 {
|
||||||
return false
|
if !slices.Contains(s2, item) {
|
||||||
}
|
|
||||||
sort.Strings(s1)
|
|
||||||
sort.Strings(s2)
|
|
||||||
for i := range s1 {
|
|
||||||
if s1[i] != s2[i] {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1558,3 +1600,24 @@ func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
|||||||
}
|
}
|
||||||
return m2
|
return m2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type arrValue[B any] interface {
|
||||||
|
encoding.TextUnmarshaler
|
||||||
|
*B
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) {
|
||||||
|
outputs := make([]*T, 0, len(s))
|
||||||
|
for _, text := range s {
|
||||||
|
if text == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
output := new(T)
|
||||||
|
if err := PT(output).UnmarshalText([]byte(text)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
outputs = append(outputs, output)
|
||||||
|
}
|
||||||
|
return outputs, nil
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
105
bake/compose.go
105
bake/compose.go
@@ -5,13 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/v2/consts"
|
"github.com/compose-spec/compose-go/v2/consts"
|
||||||
"github.com/compose-spec/compose-go/v2/dotenv"
|
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||||
"github.com/compose-spec/compose-go/v2/loader"
|
"github.com/compose-spec/compose-go/v2/loader"
|
||||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -102,6 +103,12 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
shmSize = &shmSizeStr
|
shmSize = &shmSizeStr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var networkModeP *string
|
||||||
|
if s.Build.Network != "" {
|
||||||
|
networkMode := s.Build.Network
|
||||||
|
networkModeP = &networkMode
|
||||||
|
}
|
||||||
|
|
||||||
var ulimits []string
|
var ulimits []string
|
||||||
if s.Build.Ulimits != nil {
|
if s.Build.Ulimits != nil {
|
||||||
for n, u := range s.Build.Ulimits {
|
for n, u := range s.Build.Ulimits {
|
||||||
@@ -113,14 +120,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ssh []string
|
var ssh []*buildflags.SSH
|
||||||
for _, bkey := range s.Build.SSH {
|
for _, bkey := range s.Build.SSH {
|
||||||
sshkey := composeToBuildkitSSH(bkey)
|
sshkey := composeToBuildkitSSH(bkey)
|
||||||
ssh = append(ssh, sshkey)
|
ssh = append(ssh, sshkey)
|
||||||
}
|
}
|
||||||
sort.Strings(ssh)
|
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
|
|
||||||
var secrets []string
|
var secrets []*buildflags.Secret
|
||||||
for _, bs := range s.Build.Secrets {
|
for _, bs := range s.Build.Secrets {
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -136,6 +145,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
labels[k] = &v
|
labels[k] = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
g.Targets = append(g.Targets, targetName)
|
g.Targets = append(g.Targets, targetName)
|
||||||
t := &Target{
|
t := &Target{
|
||||||
Name: targetName,
|
Name: targetName,
|
||||||
@@ -152,9 +171,9 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: cacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
CacheTo: cacheTo,
|
||||||
NetworkMode: &s.Build.Network,
|
NetworkMode: networkModeP,
|
||||||
SSH: ssh,
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
ShmSize: shmSize,
|
ShmSize: shmSize,
|
||||||
@@ -173,7 +192,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
}
|
}
|
||||||
c.Groups = append(c.Groups, g)
|
c.Groups = append(c.Groups, g)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
@@ -292,10 +310,12 @@ type xbake struct {
|
|||||||
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
type stringMap map[string]string
|
type (
|
||||||
type stringArray []string
|
stringMap map[string]string
|
||||||
|
stringArray []string
|
||||||
|
)
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var multi []string
|
var multi []string
|
||||||
err := unmarshal(&multi)
|
err := unmarshal(&multi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -312,7 +332,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
func (t *Target) composeExtTarget(exts map[string]any) error {
|
||||||
var xb xbake
|
var xb xbake
|
||||||
|
|
||||||
ext, ok := exts["x-bake"]
|
ext, ok := exts["x-bake"]
|
||||||
@@ -329,23 +349,45 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheFrom) > 0 {
|
if len(xb.CacheFrom) > 0 {
|
||||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||||
}
|
}
|
||||||
if len(xb.CacheTo) > 0 {
|
if len(xb.CacheTo) > 0 {
|
||||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||||
}
|
}
|
||||||
if len(xb.Secrets) > 0 {
|
if len(xb.Secrets) > 0 {
|
||||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Secrets = t.Secrets.Merge(secrets)
|
||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
|
||||||
sort.Strings(t.SSH)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.SSH = t.SSH.Merge(ssh)
|
||||||
|
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
}
|
}
|
||||||
if len(xb.Outputs) > 0 {
|
if len(xb.Outputs) > 0 {
|
||||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Outputs = t.Outputs.Merge(outputs)
|
||||||
}
|
}
|
||||||
if xb.Pull != nil {
|
if xb.Pull != nil {
|
||||||
t.Pull = xb.Pull
|
t.Pull = xb.Pull
|
||||||
@@ -365,35 +407,30 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
|
||||||
if psecret.External {
|
if psecret.External {
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bkattrs []string
|
secret := &buildflags.Secret{}
|
||||||
if inp.Source != "" {
|
if inp.Source != "" {
|
||||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
secret.ID = inp.Source
|
||||||
}
|
}
|
||||||
if psecret.File != "" {
|
if psecret.File != "" {
|
||||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
secret.FilePath = psecret.File
|
||||||
}
|
}
|
||||||
if psecret.Environment != "" {
|
if psecret.Environment != "" {
|
||||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
secret.Env = psecret.Environment
|
||||||
}
|
}
|
||||||
|
return secret, nil
|
||||||
return strings.Join(bkattrs, ","), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
|
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
|
||||||
var bkattrs []string
|
bkssh := &buildflags.SSH{ID: sshKey.ID}
|
||||||
|
|
||||||
bkattrs = append(bkattrs, sshKey.ID)
|
|
||||||
|
|
||||||
if sshKey.Path != "" {
|
if sshKey.Path != "" {
|
||||||
bkattrs = append(bkattrs, sshKey.Path)
|
bkssh.Paths = []string{sshKey.Path}
|
||||||
}
|
}
|
||||||
|
return bkssh
|
||||||
return strings.Join(bkattrs, "=")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestParseCompose(t *testing.T) {
|
func TestParseCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build: ./db
|
build: ./db
|
||||||
@@ -33,7 +33,7 @@ services:
|
|||||||
cache_to:
|
cache_to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
ssh:
|
ssh:
|
||||||
- key=path/to/key
|
- key=/path/to/key
|
||||||
- default
|
- default
|
||||||
secrets:
|
secrets:
|
||||||
- token
|
- token
|
||||||
@@ -74,14 +74,14 @@ secrets:
|
|||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
|
||||||
"id=aws,src=/root/.aws/credentials",
|
"id=aws,src=/root/.aws/credentials",
|
||||||
}, c.Targets[1].Secrets)
|
"id=token,env=ENV_TOKEN",
|
||||||
|
}, stringify(c.Targets[1].Secrets))
|
||||||
|
|
||||||
require.Equal(t, "webapp2", c.Targets[2].Name)
|
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||||
require.Equal(t, "dir", *c.Targets[2].Context)
|
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||||
@@ -89,7 +89,7 @@ secrets:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
external:
|
external:
|
||||||
image: "verycooldb:1337"
|
image: "verycooldb:1337"
|
||||||
@@ -103,7 +103,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -129,7 +129,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -153,7 +153,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildArgEnvCompose(t *testing.T) {
|
func TestBuildArgEnvCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
@@ -179,7 +179,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestInconsistentComposeFile(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
@@ -190,7 +190,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
networks:
|
networks:
|
||||||
@@ -215,7 +215,7 @@ networks:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTags(t *testing.T) {
|
func TestTags(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
image: example
|
image: example
|
||||||
@@ -233,7 +233,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
@@ -269,7 +269,7 @@ networks:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExt(t *testing.T) {
|
func TestComposeExt(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
addon:
|
addon:
|
||||||
image: ct-addon:bar
|
image: ct-addon:bar
|
||||||
@@ -283,7 +283,7 @@ services:
|
|||||||
tags:
|
tags:
|
||||||
- ct-addon:baz
|
- ct-addon:baz
|
||||||
ssh:
|
ssh:
|
||||||
key: path/to/key
|
key: /path/to/key
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
@@ -336,23 +336,23 @@ services:
|
|||||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
||||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
|
||||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||||
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
image: app:bar
|
image: app:bar
|
||||||
@@ -383,9 +383,9 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
@@ -396,7 +396,7 @@ func TestEnv(t *testing.T) {
|
|||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) {
|
|||||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -443,7 +443,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
func TestPorts(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
@@ -664,7 +664,7 @@ target "default" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeNullArgs(t *testing.T) {
|
func TestComposeNullArgs(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -680,7 +680,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOn(t *testing.T) {
|
func TestDependsOn(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
@@ -711,7 +711,7 @@ services:
|
|||||||
`), 0644)
|
`), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
include:
|
include:
|
||||||
- compose-foo.yml
|
- compose-foo.yml
|
||||||
|
|
||||||
@@ -740,7 +740,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDevelop(t *testing.T) {
|
func TestDevelop(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -759,7 +759,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCgroup(t *testing.T) {
|
func TestCgroup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -772,7 +772,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProjectName(t *testing.T) {
|
func TestProjectName(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
|||||||
@@ -2,17 +2,25 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EntitlementKey string
|
type EntitlementKey string
|
||||||
@@ -20,6 +28,7 @@ type EntitlementKey string
|
|||||||
const (
|
const (
|
||||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||||
|
EntitlementKeyDevice EntitlementKey = "device"
|
||||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||||
EntitlementKeyFS EntitlementKey = "fs"
|
EntitlementKeyFS EntitlementKey = "fs"
|
||||||
@@ -32,6 +41,7 @@ const (
|
|||||||
type EntitlementConf struct {
|
type EntitlementConf struct {
|
||||||
NetworkHost bool
|
NetworkHost bool
|
||||||
SecurityInsecure bool
|
SecurityInsecure bool
|
||||||
|
Devices *EntitlementsDevicesConf
|
||||||
FSRead []string
|
FSRead []string
|
||||||
FSWrite []string
|
FSWrite []string
|
||||||
ImagePush []string
|
ImagePush []string
|
||||||
@@ -39,6 +49,11 @@ type EntitlementConf struct {
|
|||||||
SSH bool
|
SSH bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EntitlementsDevicesConf struct {
|
||||||
|
All bool
|
||||||
|
Devices map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||||
var conf EntitlementConf
|
var conf EntitlementConf
|
||||||
for _, e := range in {
|
for _, e := range in {
|
||||||
@@ -52,6 +67,22 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
|||||||
default:
|
default:
|
||||||
k, v, _ := strings.Cut(e, "=")
|
k, v, _ := strings.Cut(e, "=")
|
||||||
switch k {
|
switch k {
|
||||||
|
case string(EntitlementKeyDevice):
|
||||||
|
if v == "" {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
|
||||||
|
}
|
||||||
|
if conf.Devices == nil {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if conf.Devices.Devices == nil {
|
||||||
|
conf.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
conf.Devices.Devices[fields[0]] = struct{}{}
|
||||||
case string(EntitlementKeyFSRead):
|
case string(EntitlementKeyFSRead):
|
||||||
conf.FSRead = append(conf.FSRead, v)
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
case string(EntitlementKeyFSWrite):
|
case string(EntitlementKeyFSWrite):
|
||||||
@@ -67,10 +98,8 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
|||||||
conf.ImagePush = append(conf.ImagePush, v)
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
default:
|
default:
|
||||||
return conf, errors.Errorf("uknown entitlement key %q", k)
|
return conf, errors.Errorf("unknown entitlement key %q", k)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: dedupe slices and parent paths
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return conf, nil
|
return conf, nil
|
||||||
@@ -90,21 +119,99 @@ func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf,
|
|||||||
|
|
||||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||||
for _, e := range bo.Allow {
|
for _, e := range bo.Allow {
|
||||||
|
k, rest, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case entitlements.EntitlementDevice.String():
|
||||||
|
if rest == "" {
|
||||||
|
if c.Devices == nil || !c.Devices.All {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(rest, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
|
||||||
|
}
|
||||||
|
if expected.Devices == nil {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if expected.Devices.Devices == nil {
|
||||||
|
expected.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
expected.Devices.Devices[fields[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
switch e {
|
switch e {
|
||||||
case entitlements.EntitlementNetworkHost:
|
case entitlements.EntitlementNetworkHost.String():
|
||||||
if !c.NetworkHost {
|
if !c.NetworkHost {
|
||||||
expected.NetworkHost = true
|
expected.NetworkHost = true
|
||||||
}
|
}
|
||||||
case entitlements.EntitlementSecurityInsecure:
|
case entitlements.EntitlementSecurityInsecure.String():
|
||||||
if !c.SecurityInsecure {
|
if !c.SecurityInsecure {
|
||||||
expected.SecurityInsecure = true
|
expected.SecurityInsecure = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rwPaths := map[string]struct{}{}
|
||||||
|
roPaths := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, p := range collectLocalPaths(bo.Inputs) {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range bo.ExportsLocalPathsTemporary {
|
||||||
|
rwPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ce := range bo.CacheTo {
|
||||||
|
if ce.Type == "local" {
|
||||||
|
if dest, ok := ce.Attrs["dest"]; ok {
|
||||||
|
rwPaths[dest] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ci := range bo.CacheFrom {
|
||||||
|
if ci.Type == "local" {
|
||||||
|
if src, ok := ci.Attrs["src"]; ok {
|
||||||
|
roPaths[src] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range bo.SecretSpecs {
|
||||||
|
if secret.FilePath != "" {
|
||||||
|
roPaths[secret.FilePath] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ssh := range bo.SSHSpecs {
|
||||||
|
for _, p := range ssh.Paths {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
if len(ssh.Paths) == 0 {
|
||||||
|
if !c.SSH {
|
||||||
|
expected.SSH = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
expected.FSRead, err = findMissingPaths(c.FSRead, roPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected.FSWrite, err = findMissingPaths(c.FSWrite, rwPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Writer) error {
|
||||||
var term bool
|
var term bool
|
||||||
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||||
term = true
|
term = true
|
||||||
@@ -113,35 +220,93 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
var msgs []string
|
var msgs []string
|
||||||
var flags []string
|
var flags []string
|
||||||
|
|
||||||
|
// these warnings are currently disabled to give users time to update
|
||||||
|
var msgsFS []string
|
||||||
|
var flagsFS []string
|
||||||
|
|
||||||
if c.NetworkHost {
|
if c.NetworkHost {
|
||||||
msgs = append(msgs, " - Running build containers that can access host network")
|
msgs = append(msgs, " - Running build containers that can access host network")
|
||||||
flags = append(flags, "network.host")
|
flags = append(flags, string(EntitlementKeyNetworkHost))
|
||||||
}
|
}
|
||||||
if c.SecurityInsecure {
|
if c.SecurityInsecure {
|
||||||
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||||
flags = append(flags, "security.insecure")
|
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msgs) == 0 {
|
if c.Devices != nil {
|
||||||
|
if c.Devices.All {
|
||||||
|
msgs = append(msgs, " - Access to CDI devices")
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice))
|
||||||
|
} else {
|
||||||
|
for d := range c.Devices.Devices {
|
||||||
|
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SSH {
|
||||||
|
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
roPaths, rwPaths, commonPaths := groupSamePaths(c.FSRead, c.FSWrite)
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get current working directory")
|
||||||
|
}
|
||||||
|
wd, err = filepath.EvalSymlinks(wd)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to evaluate working directory")
|
||||||
|
}
|
||||||
|
roPaths = toRelativePaths(roPaths, wd)
|
||||||
|
rwPaths = toRelativePaths(rwPaths, wd)
|
||||||
|
commonPaths = toRelativePaths(commonPaths, wd)
|
||||||
|
|
||||||
|
if len(commonPaths) > 0 {
|
||||||
|
for _, p := range commonPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read and write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFS)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(roPaths) > 0 {
|
||||||
|
for _, p := range roPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSRead)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rwPaths) > 0 {
|
||||||
|
for _, p := range rwPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSWrite)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) == 0 && len(msgsFS) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||||
for _, m := range msgs {
|
for _, m := range slices.Concat(msgs, msgsFS) {
|
||||||
fmt.Fprintf(out, "%s\n", m)
|
fmt.Fprintf(out, "%s\n", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, f := range flags {
|
for i, f := range flags {
|
||||||
flags[i] = "--allow=" + f
|
flags[i] = "--allow=" + f
|
||||||
}
|
}
|
||||||
|
for i, f := range flagsFS {
|
||||||
if term {
|
flagsFS[i] = "--allow=" + f
|
||||||
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
args := append([]string(nil), os.Args...)
|
if term {
|
||||||
|
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
args := slices.Clone(os.Args)
|
||||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||||
args[0] = v
|
args[0] = v
|
||||||
}
|
}
|
||||||
@@ -149,7 +314,33 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
|
|
||||||
if idx != -1 {
|
if idx != -1 {
|
||||||
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||||
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " "))
|
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
fsEntitlementsEnabled := true
|
||||||
|
if isRemote {
|
||||||
|
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = !vv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
|
||||||
|
if fsEntitlementsSet {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BUILDX_BAKE_ENTITLEMENTS_FS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = vv
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fsEntitlementsEnabled && len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
|
||||||
|
fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if term {
|
if term {
|
||||||
@@ -173,3 +364,296 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
|
|
||||||
return errors.Errorf("additional privileges requested")
|
return errors.Errorf("additional privileges requested")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isParentOrEqualPath(p, parent string) bool {
|
||||||
|
if p == parent || parent == "/" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(p, filepath.Clean(parent+string(filepath.Separator))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func findMissingPaths(set []string, paths map[string]struct{}) ([]string, error) {
|
||||||
|
set, allowAny, err := evaluatePaths(set)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if allowAny {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, err = evaluateToExistingPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
paths, err = dedupPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]string, 0, len(paths))
|
||||||
|
loop0:
|
||||||
|
for p := range paths {
|
||||||
|
for _, c := range set {
|
||||||
|
if isParentOrEqualPath(p, c) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(out)
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dedupPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
arr := make([]string, 0, len(in))
|
||||||
|
for p := range in {
|
||||||
|
arr = append(arr, filepath.Clean(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(arr, func(a, b string) int {
|
||||||
|
return cmp.Compare(len(a), len(b))
|
||||||
|
})
|
||||||
|
|
||||||
|
m := make(map[string]struct{}, len(arr))
|
||||||
|
loop0:
|
||||||
|
for _, p := range arr {
|
||||||
|
for parent := range m {
|
||||||
|
if strings.HasPrefix(p, parent+string(filepath.Separator)) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[p] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRelativePaths(in []string, wd string) []string {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
for _, p := range in {
|
||||||
|
rel, err := filepath.Rel(wd, p)
|
||||||
|
if err == nil {
|
||||||
|
// allow up to one level of ".." in the path
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)+"..") {
|
||||||
|
out = append(out, rel)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func groupSamePaths(in1, in2 []string) ([]string, []string, []string) {
|
||||||
|
if in1 == nil || in2 == nil {
|
||||||
|
return in1, in2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(in1)
|
||||||
|
slices.Sort(in2)
|
||||||
|
|
||||||
|
common := []string{}
|
||||||
|
i, j := 0, 0
|
||||||
|
|
||||||
|
for i < len(in1) && j < len(in2) {
|
||||||
|
switch {
|
||||||
|
case in1[i] == in2[j]:
|
||||||
|
common = append(common, in1[i])
|
||||||
|
i++
|
||||||
|
j++
|
||||||
|
case in1[i] < in2[j]:
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
in1 = removeCommonPaths(in1, common)
|
||||||
|
in2 = removeCommonPaths(in2, common)
|
||||||
|
|
||||||
|
return in1, in2, common
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeCommonPaths(in, common []string) []string {
|
||||||
|
filtered := make([]string, 0, len(in))
|
||||||
|
commonIndex := 0
|
||||||
|
for _, path := range in {
|
||||||
|
if commonIndex < len(common) && path == common[commonIndex] {
|
||||||
|
commonIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, path)
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluatePaths(in []string) ([]string, bool, error) {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
allowAny := false
|
||||||
|
for _, p := range in {
|
||||||
|
if p == "*" {
|
||||||
|
allowAny = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := filepath.Abs(p)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("failed to evaluate entitlement path %q: %v", p, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, rest, err := evaluateToExistingPath(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
if rest != "" {
|
||||||
|
v = filepath.Join(v, rest)
|
||||||
|
}
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
return out, allowAny, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
m := make(map[string]struct{}, len(in))
|
||||||
|
for p := range in {
|
||||||
|
v, _, err := evaluateToExistingPath(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
m[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPath(in string) (string, string, error) {
|
||||||
|
in, err := filepath.Abs(in)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
volLen := volumeNameLen(in)
|
||||||
|
pathSeparator := string(os.PathSeparator)
|
||||||
|
|
||||||
|
if volLen < len(in) && os.IsPathSeparator(in[volLen]) {
|
||||||
|
volLen++
|
||||||
|
}
|
||||||
|
vol := in[:volLen]
|
||||||
|
dest := vol
|
||||||
|
linksWalked := 0
|
||||||
|
var end int
|
||||||
|
for start := volLen; start < len(in); start = end {
|
||||||
|
for start < len(in) && os.IsPathSeparator(in[start]) {
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
end = start
|
||||||
|
for end < len(in) && !os.IsPathSeparator(in[end]) {
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
|
||||||
|
if end == start {
|
||||||
|
break
|
||||||
|
} else if in[start:end] == "." {
|
||||||
|
continue
|
||||||
|
} else if in[start:end] == ".." {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen || dest[r+1:] == ".." {
|
||||||
|
if len(dest) > volLen {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += ".."
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += in[start:end]
|
||||||
|
|
||||||
|
fi, err := os.Lstat(dest)
|
||||||
|
if err != nil {
|
||||||
|
// If the component doesn't exist, return the last valid path
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
for r := len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
return dest[:r], in[start:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vol, in[start:], nil
|
||||||
|
}
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&fs.ModeSymlink == 0 {
|
||||||
|
if !fi.Mode().IsDir() && end < len(in) {
|
||||||
|
return "", "", syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
linksWalked++
|
||||||
|
if linksWalked > 255 {
|
||||||
|
return "", "", errors.New("too many symlinks")
|
||||||
|
}
|
||||||
|
|
||||||
|
link, err := os.Readlink(dest)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
in = link + in[end:]
|
||||||
|
|
||||||
|
v := volumeNameLen(link)
|
||||||
|
if v > 0 {
|
||||||
|
if v < len(link) && os.IsPathSeparator(link[v]) {
|
||||||
|
v++
|
||||||
|
}
|
||||||
|
vol = link[:v]
|
||||||
|
dest = vol
|
||||||
|
end = len(vol)
|
||||||
|
} else if len(link) > 0 && os.IsPathSeparator(link[0]) {
|
||||||
|
dest = link[:1]
|
||||||
|
end = 1
|
||||||
|
vol = link[:1]
|
||||||
|
volLen = 1
|
||||||
|
} else {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen {
|
||||||
|
dest = vol
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
end = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filepath.Clean(dest), "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeNameLen(s string) int {
|
||||||
|
return len(filepath.VolumeName(s))
|
||||||
|
}
|
||||||
|
|||||||
486
bake/entitlements_test.go
Normal file
486
bake/entitlements_test.go
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
package bake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEvaluateToExistingPath(t *testing.T) {
|
||||||
|
tempDir, err := osutil.GetLongPathName(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Setup temporary directory structure for testing
|
||||||
|
existingFile := filepath.Join(tempDir, "existing_file")
|
||||||
|
require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0644))
|
||||||
|
|
||||||
|
existingDir := filepath.Join(tempDir, "existing_dir")
|
||||||
|
require.NoError(t, os.Mkdir(existingDir, 0755))
|
||||||
|
|
||||||
|
symlinkToFile := filepath.Join(tempDir, "symlink_to_file")
|
||||||
|
require.NoError(t, os.Symlink(existingFile, symlinkToFile))
|
||||||
|
|
||||||
|
symlinkToDir := filepath.Join(tempDir, "symlink_to_dir")
|
||||||
|
require.NoError(t, os.Symlink(existingDir, symlinkToDir))
|
||||||
|
|
||||||
|
nonexistentPath := filepath.Join(tempDir, "nonexistent", "path", "file.txt")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Existing file",
|
||||||
|
input: existingFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Existing directory",
|
||||||
|
input: existingDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to file",
|
||||||
|
input: symlinkToFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to directory",
|
||||||
|
input: symlinkToDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent path",
|
||||||
|
input: nonexistentPath,
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent intermediate path",
|
||||||
|
input: filepath.Join(tempDir, "nonexistent", "file.txt"),
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Root path",
|
||||||
|
input: "/",
|
||||||
|
expected: func() string {
|
||||||
|
root, _ := filepath.Abs("/")
|
||||||
|
return root
|
||||||
|
}(),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result, _, err := evaluateToExistingPath(tt.input)
|
||||||
|
|
||||||
|
if tt.expectErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDedupePaths(t *testing.T) {
|
||||||
|
wd := osutil.GetWd()
|
||||||
|
tcases := []struct {
|
||||||
|
in map[string]struct{}
|
||||||
|
out map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/c/d": {},
|
||||||
|
"/a/b/c/d/e": {},
|
||||||
|
"/a/b/../b/c": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
filepath.Join(wd, "a/b/c"): {},
|
||||||
|
filepath.Join(wd, "../aa"): {},
|
||||||
|
filepath.Join(wd, "a/b"): {},
|
||||||
|
filepath.Join(wd, "a/b/d"): {},
|
||||||
|
filepath.Join(wd, "../aa/b"): {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"a/b": {},
|
||||||
|
"../aa": {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range tcases {
|
||||||
|
t.Run(fmt.Sprintf("case%d", i), func(t *testing.T) {
|
||||||
|
out, err := dedupPaths(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
// convert to relative paths as that is shown to user
|
||||||
|
arr := make([]string, 0, len(out))
|
||||||
|
for k := range out {
|
||||||
|
arr = append(arr, k)
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
arr = toRelativePaths(arr, wd)
|
||||||
|
m := make(map[string]struct{})
|
||||||
|
for _, v := range arr {
|
||||||
|
m[filepath.ToSlash(v)] = struct{}{}
|
||||||
|
}
|
||||||
|
o := make(map[string]struct{}, len(tc.out))
|
||||||
|
for k := range tc.out {
|
||||||
|
o[filepath.ToSlash(k)] = struct{}{}
|
||||||
|
}
|
||||||
|
require.Equal(t, o, m)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateEntitlements(t *testing.T) {
|
||||||
|
dir1 := t.TempDir()
|
||||||
|
dir2 := t.TempDir()
|
||||||
|
|
||||||
|
// the paths returned by entitlements validation will have symlinks resolved
|
||||||
|
expDir1, err := filepath.EvalSymlinks(dir1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expDir2, err := filepath.EvalSymlinks(dir2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
escapeLink := filepath.Join(dir1, "escape_link")
|
||||||
|
require.NoError(t, os.Symlink("../../aa", escapeLink))
|
||||||
|
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expWd, err := filepath.EvalSymlinks(wd)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tcases := []struct {
|
||||||
|
name string
|
||||||
|
conf EntitlementConf
|
||||||
|
opt build.Options
|
||||||
|
expected EntitlementConf
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "No entitlements",
|
||||||
|
opt: build.Options{
|
||||||
|
Inputs: build.Inputs{
|
||||||
|
ContextState: &llb.State{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityAndNetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityMissingAndNetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SSHMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
SSHSpecs: []*pb.SSH{
|
||||||
|
{
|
||||||
|
ID: "test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SSH: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ExportLocal",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
filepath.Join(dir1, "subdir"),
|
||||||
|
dir2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: func() []string {
|
||||||
|
exp := []string{expDir1, expDir2}
|
||||||
|
slices.Sort(exp)
|
||||||
|
return exp
|
||||||
|
}(),
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromSubFile",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: filepath.Join(dir1, "subfile"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLink",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{filepath.Join(expDir1, "../..")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowRoot",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"/"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: func() []string {
|
||||||
|
// on windows root (/) is only allowed if it is the same volume as wd
|
||||||
|
if filepath.VolumeName(wd) == filepath.VolumeName(escapeLink) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// if not, then escapeLink is not allowed
|
||||||
|
exp, _, err := evaluateToExistingPath(escapeLink)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp, err = filepath.EvalSymlinks(exp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return []string{exp}
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowAny",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"*"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathSubpath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as only subpath was allowed
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathMatches",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as build also needs to write not/exists directory
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingBuildPath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
expected, err := tc.conf.Validate(map[string]build.Options{"test": tc.opt})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.expected, expected)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGroupSamePaths(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in1 []string
|
||||||
|
in2 []string
|
||||||
|
expected1 []string
|
||||||
|
expected2 []string
|
||||||
|
expectedC []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "All common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{"/path/c", "/path/d"},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Some common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a"},
|
||||||
|
expected2: []string{"/path/d"},
|
||||||
|
expectedC: []string{"/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty inputs",
|
||||||
|
in1: []string{},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "One empty input",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unsorted inputs with common paths",
|
||||||
|
in1: []string{"/path/c", "/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/a"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
out1, out2, common := groupSamePaths(tt.in1, tt.in2)
|
||||||
|
require.Equal(t, tt.expected1, out1, "in1 should match expected1")
|
||||||
|
require.Equal(t, tt.expected2, out2, "in2 should match expected2")
|
||||||
|
require.Equal(t, tt.expectedC, common, "common should match expectedC")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
267
bake/hcl_test.go
267
bake/hcl_test.go
@@ -2,8 +2,10 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,6 +19,7 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
target "db" {
|
target "db" {
|
||||||
context = "./db"
|
context = "./db"
|
||||||
tags = ["docker.io/tonistiigi/db"]
|
tags = ["docker.io/tonistiigi/db"]
|
||||||
|
output = ["type=image"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "webapp" {
|
target "webapp" {
|
||||||
@@ -25,6 +28,9 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
args = {
|
args = {
|
||||||
buildno = "123"
|
buildno = "123"
|
||||||
}
|
}
|
||||||
|
output = [
|
||||||
|
{ type = "image" }
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "cross" {
|
target "cross" {
|
||||||
@@ -49,18 +55,18 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, "cross", c.Targets[2].Name)
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
require.Equal(t, "webapp-plus", c.Targets[3].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
||||||
}
|
}
|
||||||
@@ -109,18 +115,18 @@ func TestHCLBasicInJSON(t *testing.T) {
|
|||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, "cross", c.Targets[2].Name)
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
require.Equal(t, "webapp-plus", c.Targets[3].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
||||||
}
|
}
|
||||||
@@ -146,7 +152,7 @@ func TestHCLWithFunctions(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,7 +182,7 @@ func TestHCLWithUserDefinedFunctions(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +211,7 @@ func TestHCLWithVariables(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[0].Args["buildno"])
|
||||||
|
|
||||||
t.Setenv("BUILD_NUMBER", "456")
|
t.Setenv("BUILD_NUMBER", "456")
|
||||||
@@ -218,7 +224,7 @@ func TestHCLWithVariables(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("456"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("456"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,7 +247,7 @@ func TestHCLWithVariablesInFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
t.Setenv("REPO", "docker/buildx")
|
t.Setenv("REPO", "docker/buildx")
|
||||||
@@ -250,7 +256,7 @@ func TestHCLWithVariablesInFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,7 +285,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-abc"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-abc"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("abc-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("abc-post"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -292,7 +298,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("def-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("def-post"), c.Targets[0].Args["v2"])
|
||||||
}
|
}
|
||||||
@@ -328,7 +334,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre--ABCDEF-"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre--ABCDEF-"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("ABCDEF-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("ABCDEF-post"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -341,7 +347,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre--NEWDEF-"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre--NEWDEF-"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("NEWDEF-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("NEWDEF-post"), c.Targets[0].Args["v2"])
|
||||||
}
|
}
|
||||||
@@ -366,7 +372,7 @@ func TestHCLTypedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("lower"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("lower"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("yes"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("yes"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -377,7 +383,7 @@ func TestHCLTypedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("higher"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("higher"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("no"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("no"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -475,7 +481,7 @@ func TestHCLAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
// env does not apply if no variable
|
// env does not apply if no variable
|
||||||
@@ -484,7 +490,7 @@ func TestHCLAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
||||||
// attr-multifile
|
// attr-multifile
|
||||||
}
|
}
|
||||||
@@ -592,11 +598,172 @@ func TestHCLAttrsCustomType(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
attest = [
|
||||||
|
{ type = "provenance", mode = "max" },
|
||||||
|
"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
"type=local,src=path/to/cache",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [
|
||||||
|
{ type = "local", dest = "path/to/cache" },
|
||||||
|
]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
"type=local,dest=../out",
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
{ id = "mysecret2", env = "TOKEN" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/key"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
|
||||||
|
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,env=TOKEN"}, stringify(c.Targets[0].Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType_ObjectVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
variable "foo" {
|
||||||
|
default = "bar"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
"type=local,src=path/to/cache",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [ target.app.cache-from[0] ]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
"type=local,dest=../out",
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/${target.app.output[0].type}"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "web" {
|
||||||
|
cache-from = target.app.cache-from
|
||||||
|
|
||||||
|
output = [ "type=oci,dest=../${foo}.tar" ]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = target.app.output[0].type, src = "/${target.app.cache-from[1].type}/secret" },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
|
findTarget := func(t *testing.T, name string) *Target {
|
||||||
|
t.Helper()
|
||||||
|
for _, tgt := range c.Targets {
|
||||||
|
if tgt.Name == name {
|
||||||
|
return tgt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("could not find target %q", name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
app := findTarget(t, "app")
|
||||||
|
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(app.Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(app.CacheFrom))
|
||||||
|
require.Equal(t, []string{"user/app:cache"}, stringify(app.CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret"}, stringify(app.Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/oci"}, stringify(app.SSH))
|
||||||
|
|
||||||
|
web := findTarget(t, "web")
|
||||||
|
require.Equal(t, []string{"type=oci,dest=../bar.tar"}, stringify(web.Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(web.CacheFrom))
|
||||||
|
require.Equal(t, []string{"id=oci,src=/local/secret"}, stringify(web.Secrets))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType_MissingVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
attest = [
|
||||||
|
"type=sbom,disabled=${SBOM}",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:${FOO1}" },
|
||||||
|
"type=local,src=path/to/cache:${FOO2}",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [
|
||||||
|
{ type = "local", dest = "path/to/${BAR}" },
|
||||||
|
]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../${OUTPUT}.tar" },
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/${SECRET}" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "key", paths = ["path/to/${SSH_KEY}"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
_, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.ErrorAs(t, err, &diags)
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`There is no variable named "([\w\d_]+)"`)
|
||||||
|
var actual []string
|
||||||
|
for _, diag := range diags {
|
||||||
|
if m := re.FindStringSubmatch(diag.Error()); m != nil {
|
||||||
|
actual = append(actual, m[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.ElementsMatch(t,
|
||||||
|
[]string{"SBOM", "FOO1", "FOO2", "BAR", "OUTPUT", "SECRET", "SSH_KEY"},
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
func TestHCLMultiFileAttrs(t *testing.T) {
|
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
variable "FOO" {
|
variable "FOO" {
|
||||||
@@ -618,7 +785,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
t.Setenv("FOO", "ghi")
|
t.Setenv("FOO", "ghi")
|
||||||
@@ -630,7 +797,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -653,7 +820,7 @@ func TestHCLMultiFileGlobalAttrs(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, "pre-def", *c.Targets[0].Args["v1"])
|
require.Equal(t, "pre-def", *c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -839,12 +1006,12 @@ func TestHCLRenameMultiFile(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "bar")
|
require.Equal(t, "bar", c.Targets[0].Name)
|
||||||
require.Equal(t, *c.Targets[0].Dockerfile, "x")
|
require.Equal(t, "x", *c.Targets[0].Dockerfile)
|
||||||
require.Equal(t, *c.Targets[0].Target, "z")
|
require.Equal(t, "z", *c.Targets[0].Target)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "foo")
|
require.Equal(t, "foo", c.Targets[1].Name)
|
||||||
require.Equal(t, *c.Targets[1].Context, "y")
|
require.Equal(t, "y", *c.Targets[1].Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHCLMatrixBasic(t *testing.T) {
|
func TestHCLMatrixBasic(t *testing.T) {
|
||||||
@@ -862,10 +1029,10 @@ func TestHCLMatrixBasic(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "x")
|
require.Equal(t, "x", c.Targets[0].Name)
|
||||||
require.Equal(t, c.Targets[1].Name, "y")
|
require.Equal(t, "y", c.Targets[1].Name)
|
||||||
require.Equal(t, *c.Targets[0].Dockerfile, "x.Dockerfile")
|
require.Equal(t, "x.Dockerfile", *c.Targets[0].Dockerfile)
|
||||||
require.Equal(t, *c.Targets[1].Dockerfile, "y.Dockerfile")
|
require.Equal(t, "y.Dockerfile", *c.Targets[1].Dockerfile)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
@@ -948,9 +1115,9 @@ func TestHCLMatrixMaps(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "aa")
|
require.Equal(t, "aa", c.Targets[0].Name)
|
||||||
require.Equal(t, c.Targets[0].Args["target"], ptrstr("valbb"))
|
require.Equal(t, c.Targets[0].Args["target"], ptrstr("valbb"))
|
||||||
require.Equal(t, c.Targets[1].Name, "cc")
|
require.Equal(t, "cc", c.Targets[1].Name)
|
||||||
require.Equal(t, c.Targets[1].Args["target"], ptrstr("valdd"))
|
require.Equal(t, c.Targets[1].Args["target"], ptrstr("valdd"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1141,7 +1308,7 @@ func TestJSONAttributes(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-abc-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-abc-def"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1166,7 +1333,7 @@ func TestJSONFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-<FOO-abc>"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-<FOO-abc>"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1184,7 +1351,7 @@ func TestJSONInvalidFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr(`myfunc("foo")`), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr(`myfunc("foo")`), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1212,7 +1379,7 @@ func TestHCLFunctionInAttr(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("FOO <> [baz]"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("FOO <> [baz]"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1243,7 +1410,7 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
|
||||||
require.Equal(t, "dir", *c.Targets[0].Context)
|
require.Equal(t, "dir", *c.Targets[0].Context)
|
||||||
@@ -1266,7 +1433,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, "foo", *c.Targets[0].Context)
|
require.Equal(t, "foo", *c.Targets[0].Context)
|
||||||
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
||||||
}
|
}
|
||||||
@@ -1332,17 +1499,17 @@ target "b" {
|
|||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "metadata-a")
|
require.Equal(t, "metadata-a", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "metadata-b")
|
require.Equal(t, "metadata-b", c.Targets[1].Name)
|
||||||
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "a")
|
require.Equal(t, "a", c.Targets[2].Name)
|
||||||
require.Equal(t, ".", *c.Targets[2].Context)
|
require.Equal(t, ".", *c.Targets[2].Context)
|
||||||
require.Equal(t, "a", *c.Targets[2].Target)
|
require.Equal(t, "a", *c.Targets[2].Target)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "b")
|
require.Equal(t, "b", c.Targets[3].Name)
|
||||||
require.Equal(t, ".", *c.Targets[3].Context)
|
require.Equal(t, ".", *c.Targets[3].Context)
|
||||||
require.Equal(t, "b", *c.Targets[3].Target)
|
require.Equal(t, "b", *c.Targets[3].Target)
|
||||||
}
|
}
|
||||||
@@ -1389,10 +1556,10 @@ target "two" {
|
|||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "one")
|
require.Equal(t, "one", c.Targets[0].Name)
|
||||||
require.Equal(t, map[string]*string{"a": ptrstr("pre-ghi-jkl")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"a": ptrstr("pre-ghi-jkl")}, c.Targets[0].Args)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "two")
|
require.Equal(t, "two", c.Targets[1].Name)
|
||||||
require.Equal(t, map[string]*string{"b": ptrstr("pre-jkl")}, c.Targets[1].Args)
|
require.Equal(t, map[string]*string{"b": ptrstr("pre-jkl")}, c.Targets[1].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1478,7 +1645,7 @@ func TestHCLIndexOfFunc(t *testing.T) {
|
|||||||
require.Empty(t, c.Targets[1].Tags[1])
|
require.Empty(t, c.Targets[1].Tags[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func ptrstr(s interface{}) *string {
|
func ptrstr(s any) *string {
|
||||||
var n *string
|
var n *string
|
||||||
if reflect.ValueOf(s).Kind() == reflect.String {
|
if reflect.ValueOf(s).Kind() == reflect.String {
|
||||||
ss := s.(string)
|
ss := s.(string)
|
||||||
|
|||||||
355
bake/hclparser/LICENSE
Normal file
355
bake/hclparser/LICENSE
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
Copyright (c) 2014 HashiCorp, Inc.
|
||||||
|
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
348
bake/hclparser/gohcl/decode.go
Normal file
348
bake/hclparser/gohcl/decode.go
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodeOptions allows customizing sections of the decoding process.
|
||||||
|
type DecodeOptions struct {
|
||||||
|
ImpliedType func(gv any) (cty.Type, error)
|
||||||
|
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.decodeBodyToValue(body, ctx, rv.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeBody extracts the configuration within the given body into the given
|
||||||
|
// value. This value must be a non-nil pointer to either a struct or
|
||||||
|
// a map, where in the former case the configuration will be decoded using
|
||||||
|
// struct tags and in the latter case only attributes are allowed and their
|
||||||
|
// values are decoded into the map.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
et := val.Type()
|
||||||
|
switch et.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return o.decodeBodyToStruct(body, ctx, val)
|
||||||
|
case reflect.Map:
|
||||||
|
return o.decodeBodyToMap(body, ctx, val)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
schema, partial := ImpliedBodySchema(val.Interface())
|
||||||
|
|
||||||
|
var content *hcl.BodyContent
|
||||||
|
var leftovers hcl.Body
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
if partial {
|
||||||
|
content, leftovers, diags = body.PartialContent(schema)
|
||||||
|
} else {
|
||||||
|
content, diags = body.Content(schema)
|
||||||
|
}
|
||||||
|
if content == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(val.Type())
|
||||||
|
|
||||||
|
if tags.Body != nil {
|
||||||
|
fieldIdx := *tags.Body
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(body))
|
||||||
|
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags.Remain != nil {
|
||||||
|
fieldIdx := *tags.Remain
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(leftovers))
|
||||||
|
case attrsType.AssignableTo(field.Type):
|
||||||
|
attrs, attrsDiags := leftovers.JustAttributes()
|
||||||
|
if len(attrsDiags) > 0 {
|
||||||
|
diags = append(diags, attrsDiags...)
|
||||||
|
}
|
||||||
|
fieldV.Set(reflect.ValueOf(attrs))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, fieldIdx := range tags.Attributes {
|
||||||
|
attr := content.Attributes[name]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
|
||||||
|
if attr == nil {
|
||||||
|
if !exprType.AssignableTo(field.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a special case, if the target is of type hcl.Expression then
|
||||||
|
// we'll assign an actual expression that evalues to a cty null,
|
||||||
|
// so the caller can deal with it within the cty realm rather
|
||||||
|
// than within the Go realm.
|
||||||
|
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||||
|
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.DecodeExpression(
|
||||||
|
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksByType := content.Blocks.ByType()
|
||||||
|
|
||||||
|
for typeName, fieldIdx := range tags.Blocks {
|
||||||
|
blocks := blocksByType[typeName]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
|
||||||
|
ty := field.Type
|
||||||
|
isSlice := false
|
||||||
|
isPtr := false
|
||||||
|
if ty.Kind() == reflect.Slice {
|
||||||
|
isSlice = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
isPtr = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) > 1 && !isSlice {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Only one %s block is allowed. Another was defined at %s.",
|
||||||
|
typeName, blocks[0].DefRange.String(),
|
||||||
|
),
|
||||||
|
Subject: &blocks[1].DefRange,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
if isSlice || isPtr {
|
||||||
|
if val.Field(fieldIdx).IsNil() {
|
||||||
|
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||||
|
Subject: body.MissingItemRange().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isSlice:
|
||||||
|
elemType := ty
|
||||||
|
if isPtr {
|
||||||
|
elemType = reflect.PointerTo(ty)
|
||||||
|
}
|
||||||
|
sli := val.Field(fieldIdx)
|
||||||
|
if sli.IsNil() {
|
||||||
|
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
if isPtr {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.New(ty))
|
||||||
|
}
|
||||||
|
v := sli.Index(i)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
sli.Index(i).Set(v)
|
||||||
|
} else {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sli.Len() > len(blocks) {
|
||||||
|
sli.SetLen(len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
val.Field(fieldIdx).Set(sli)
|
||||||
|
|
||||||
|
default:
|
||||||
|
block := blocks[0]
|
||||||
|
if isPtr {
|
||||||
|
v := val.Field(fieldIdx)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
val.Field(fieldIdx).Set(v)
|
||||||
|
} else {
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
attrs, diags := body.JustAttributes()
|
||||||
|
if attrs == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := reflect.MakeMap(v.Type())
|
||||||
|
|
||||||
|
for k, attr := range attrs {
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
ev := reflect.New(v.Type().Elem())
|
||||||
|
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Set(mv)
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
diags := o.decodeBodyToValue(block.Body, ctx, v)
|
||||||
|
|
||||||
|
if len(block.Labels) > 0 {
|
||||||
|
blockTags := getFieldTags(v.Type())
|
||||||
|
for li, lv := range block.Labels {
|
||||||
|
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||||
|
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
srcVal, diags := expr.Value(ctx)
|
||||||
|
|
||||||
|
convTy, err := o.ImpliedType(val)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVal, err = o.Convert(srcVal, convTy)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gocty.FromCtyValue(srcVal, val)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeExpression extracts the value of the given expression into the given
|
||||||
|
// value. This value must be something that gocty is able to decode into,
|
||||||
|
// since the final decoding is delegated to that package.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) withDefaults() DecodeOptions {
|
||||||
|
if o.ImpliedType == nil {
|
||||||
|
o.ImpliedType = gocty.ImpliedType
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Convert == nil {
|
||||||
|
o.Convert = convert.Convert
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
806
bake/hclparser/gohcl/decode_test.go
Normal file
806
bake/hclparser/gohcl/decode_test.go
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
hclJSON "github.com/hashicorp/hcl/v2/json"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeBody(t *testing.T) {
|
||||||
|
deepEquals := func(other any) func(v any) bool {
|
||||||
|
return func(v any) bool {
|
||||||
|
return reflect.DeepEqual(v, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNameExpression struct {
|
||||||
|
Name hcl.Expression `hcl:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withTwoAttributes struct {
|
||||||
|
A string `hcl:"a,optional"`
|
||||||
|
B string `hcl:"b,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNestedBlock struct {
|
||||||
|
Plain string `hcl:"plain,optional"`
|
||||||
|
Nested *withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocks struct {
|
||||||
|
Nested []*withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocksNoPointers struct {
|
||||||
|
Nested []withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Body map[string]any
|
||||||
|
Target func() any
|
||||||
|
Check func(v any) bool
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct{}{}),
|
||||||
|
deepEquals(struct{}{}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
1, // name is required
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name nil
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name optional
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.IsNull()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 23,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
1, // Extraneous "age" property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
})
|
||||||
|
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Remain.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{
|
||||||
|
Name: "Ermintrude",
|
||||||
|
Remain: map[string]cty.Value{
|
||||||
|
"living": cty.True,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Body.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 2 &&
|
||||||
|
attrs["name"] != nil && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle == nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 0
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
//nolint:misspell
|
||||||
|
// Generating two diagnostics is good enough for this one.
|
||||||
|
// (one for the missing noodle block and the other for
|
||||||
|
// the JSON serialization detecting the missing level of
|
||||||
|
// heirarchy for the label.)
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// One diagnostic is enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodles := gotI.(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodles
|
||||||
|
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{
|
||||||
|
"type": "rice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo" && noodle.Type == "rice"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 34,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]string(nil)),
|
||||||
|
deepEquals(map[string]string{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": "34",
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 89,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]*hcl.Attribute)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 13,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]hcl.Expression)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]cty.Value(nil)),
|
||||||
|
deepEquals(map[string]cty.Value{
|
||||||
|
"name": cty.StringVal("Ermintrude"),
|
||||||
|
"living": cty.True,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"plain": "foo",
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Plain: "bar",
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
A: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": map[string]any{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block list while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Remove additional elements from the list while decoding nested blocks
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return len(n.Nested) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Make sure decoding value slices works the same as pointer slices.
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"b": "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"b": "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocksNoPointers{
|
||||||
|
Nested: []withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocksNoPointers)
|
||||||
|
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
// For convenience here we're going to use the JSON parser
|
||||||
|
// to process the given body.
|
||||||
|
buf, err := json.Marshal(test.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(string(buf), func(t *testing.T) {
|
||||||
|
file, diags := hclJSON.Parse(buf, "test.json")
|
||||||
|
if len(diags) != 0 {
|
||||||
|
t.Fatalf("diagnostics while parsing: %s", diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
targetVal := reflect.ValueOf(test.Target())
|
||||||
|
|
||||||
|
diags = DecodeBody(file.Body, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !test.Check(got) {
|
||||||
|
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeExpression(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Value cty.Value
|
||||||
|
Target any
|
||||||
|
Want any
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
"",
|
||||||
|
"hello",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
cty.NilVal,
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NumberIntVal(2),
|
||||||
|
"",
|
||||||
|
"2",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("true"),
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NullVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // null value is not allowed
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.UnknownVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // value must be known
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.ListVal([]cty.Value{cty.True}),
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
1, // bool required
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
|
||||||
|
expr := &fixedExpression{test.Value}
|
||||||
|
|
||||||
|
targetVal := reflect.New(reflect.TypeOf(test.Target))
|
||||||
|
|
||||||
|
diags := DecodeExpression(expr, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !reflect.DeepEqual(got, test.Want) {
|
||||||
|
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fixedExpression struct {
|
||||||
|
val cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||||
|
return e.val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Range() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) StartRange() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Variables() []hcl.Traversal {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeInstantiateType(target any) func() any {
|
||||||
|
return func() any {
|
||||||
|
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
65
bake/hclparser/gohcl/doc.go
Normal file
65
bake/hclparser/gohcl/doc.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||||
|
//
|
||||||
|
// It provides a convenient and concise way of describing the schema for
|
||||||
|
// configuration and then accessing the resulting data via native Go
|
||||||
|
// types.
|
||||||
|
//
|
||||||
|
// A struct field tag scheme is used, similar to other decoding and
|
||||||
|
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||||
|
//
|
||||||
|
// ThingType string `hcl:"thing_type,attr"`
|
||||||
|
//
|
||||||
|
// Within each tag there are two comma-separated tokens. The first is the
|
||||||
|
// name of the corresponding construct in configuration, while the second
|
||||||
|
// is a keyword giving the kind of construct expected. The following
|
||||||
|
// kind keywords are supported:
|
||||||
|
//
|
||||||
|
// attr (the default) indicates that the value is to be populated from an attribute
|
||||||
|
// block indicates that the value is to populated from a block
|
||||||
|
// label indicates that the value is to populated from a block label
|
||||||
|
// optional is the same as attr, but the field is optional
|
||||||
|
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||||
|
//
|
||||||
|
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||||
|
// expression is assigned, or of any type accepted by gocty, in which case
|
||||||
|
// gocty will be used to assign the value to a native Go type.
|
||||||
|
//
|
||||||
|
// "block" fields may be a struct that recursively uses the same tags, or a
|
||||||
|
// slice of such structs, in which case multiple blocks of the corresponding
|
||||||
|
// type are decoded into the slice.
|
||||||
|
//
|
||||||
|
// "body" can be placed on a single field of type hcl.Body to capture
|
||||||
|
// the full hcl.Body that was decoded for a block. This does not allow leftover
|
||||||
|
// values like "remain", so a decoding error will still be returned if leftover
|
||||||
|
// fields are given. If you want to capture the decoding body PLUS leftover
|
||||||
|
// fields, you must specify a "remain" field as well to prevent errors. The
|
||||||
|
// body field and the remain field will both contain the leftover fields.
|
||||||
|
//
|
||||||
|
// "label" fields are considered only in a struct used as the type of a field
|
||||||
|
// marked as "block", and are used sequentially to capture the labels of
|
||||||
|
// the blocks being decoded. In this case, the name token is used only as
|
||||||
|
// an identifier for the label in diagnostic messages.
|
||||||
|
//
|
||||||
|
// "optional" fields behave like "attr" fields, but they are optional
|
||||||
|
// and will not give parsing errors if they are missing.
|
||||||
|
//
|
||||||
|
// "remain" can be placed on a single field that may be either of type
|
||||||
|
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||||
|
// placed into this field for delayed processing. If no "remain" field is
|
||||||
|
// present then any attributes or blocks not matched by another valid tag
|
||||||
|
// will cause an error diagnostic.
|
||||||
|
//
|
||||||
|
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||||
|
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||||
|
// on the constraints there.
|
||||||
|
//
|
||||||
|
// Broadly-speaking this package deals with two types of error. The first is
|
||||||
|
// errors in the configuration itself, which are returned as diagnostics
|
||||||
|
// written with the configuration author as the target audience. The second
|
||||||
|
// is bugs in the calling program, such as invalid struct tags, which are
|
||||||
|
// surfaced via panics since there can be no useful runtime handling of such
|
||||||
|
// errors and they should certainly not be returned to the user as diagnostics.
|
||||||
|
package gohcl
|
||||||
192
bake/hclparser/gohcl/encode.go
Normal file
192
bake/hclparser/gohcl/encode.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||||
|
// attributes and blocks derived from the given value, which must be a
|
||||||
|
// struct value or a pointer to a struct value with the struct tags defined
|
||||||
|
// in this package.
|
||||||
|
//
|
||||||
|
// This function can work only with fully-decoded data. It will ignore any
|
||||||
|
// fields tagged as "remain", any fields that decode attributes into either
|
||||||
|
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||||
|
// into hcl.Attributes values. This function does not have enough information
|
||||||
|
// to complete the decoding of these types.
|
||||||
|
//
|
||||||
|
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||||
|
// to produce a whole hclwrite.Block including block labels.
|
||||||
|
//
|
||||||
|
// As long as a suitable value is given to encode and the destination body
|
||||||
|
// is non-nil, this function will always complete. It will panic in case of
|
||||||
|
// any errors in the calling program, such as passing an inappropriate type
|
||||||
|
// or a nil body.
|
||||||
|
//
|
||||||
|
// The layout of the resulting HCL source is derived from the ordering of
|
||||||
|
// the struct fields, with blank lines around nested blocks of different types.
|
||||||
|
// Fields representing attributes should usually precede those representing
|
||||||
|
// blocks so that the attributes can group together in the result. For more
|
||||||
|
// control, use the hclwrite API directly.
|
||||||
|
func EncodeIntoBody(val any, dst *hclwrite.Body) {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
populateBody(rv, ty, tags, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||||
|
// the given value, which must be a struct or pointer to struct with the
|
||||||
|
// struct tags defined in this package.
|
||||||
|
//
|
||||||
|
// If the given struct type has fields tagged with "label" tags then they
|
||||||
|
// will be used in order to annotate the created block with labels.
|
||||||
|
//
|
||||||
|
// This function has the same constraints as EncodeIntoBody and will panic
|
||||||
|
// if they are violated.
|
||||||
|
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
labels := make([]string, len(tags.Labels))
|
||||||
|
for i, lf := range tags.Labels {
|
||||||
|
lv := rv.Field(lf.FieldIndex)
|
||||||
|
// We just stringify whatever we find. It should always be a string
|
||||||
|
// but if not then we'll still do something reasonable.
|
||||||
|
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := hclwrite.NewBlock(blockType, labels)
|
||||||
|
populateBody(rv, ty, tags, block.Body())
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||||
|
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
for n, i := range tags.Attributes {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
for n, i := range tags.Blocks {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||||
|
ni, nj := namesOrder[i], namesOrder[j]
|
||||||
|
return nameIdxs[ni] < nameIdxs[nj]
|
||||||
|
})
|
||||||
|
|
||||||
|
dst.Clear()
|
||||||
|
|
||||||
|
prevWasBlock := false
|
||||||
|
for _, name := range namesOrder {
|
||||||
|
fieldIdx := nameIdxs[name]
|
||||||
|
field := ty.Field(fieldIdx)
|
||||||
|
fieldTy := field.Type
|
||||||
|
fieldVal := rv.Field(fieldIdx)
|
||||||
|
|
||||||
|
if fieldTy.Kind() == reflect.Ptr {
|
||||||
|
fieldTy = fieldTy.Elem()
|
||||||
|
fieldVal = fieldVal.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||||
|
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
if prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = false
|
||||||
|
}
|
||||||
|
|
||||||
|
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||||
|
if err != nil {
|
||||||
|
// This should never happen, since we should always be able
|
||||||
|
// to decode into the implied type.
|
||||||
|
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.SetAttributeValue(name, val)
|
||||||
|
} else { // must be a block, then
|
||||||
|
elemTy := fieldTy
|
||||||
|
isSeq := false
|
||||||
|
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||||
|
isSeq = true
|
||||||
|
elemTy = elemTy.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
prevWasBlock = false
|
||||||
|
|
||||||
|
if isSeq {
|
||||||
|
l := fieldVal.Len()
|
||||||
|
for i := range l {
|
||||||
|
elemVal := fieldVal.Index(i)
|
||||||
|
if !elemVal.IsValid() {
|
||||||
|
continue // ignore (elem value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
67
bake/hclparser/gohcl/encode_test.go
Normal file
67
bake/hclparser/gohcl/encode_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/gohcl"
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleEncodeIntoBody() {
|
||||||
|
type Service struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Exe []string `hcl:"executable"`
|
||||||
|
}
|
||||||
|
type Constraints struct {
|
||||||
|
OS string `hcl:"os"`
|
||||||
|
Arch string `hcl:"arch"`
|
||||||
|
}
|
||||||
|
type App struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Desc string `hcl:"description"`
|
||||||
|
Constraints *Constraints `hcl:"constraints,block"`
|
||||||
|
Services []Service `hcl:"service,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
app := App{
|
||||||
|
Name: "awesome-app",
|
||||||
|
Desc: "Such an awesome application",
|
||||||
|
Constraints: &Constraints{
|
||||||
|
OS: "linux",
|
||||||
|
Arch: "amd64",
|
||||||
|
},
|
||||||
|
Services: []Service{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Exe: []string{"./web", "--listen=:8080"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "worker",
|
||||||
|
Exe: []string{"./worker"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
f := hclwrite.NewEmptyFile()
|
||||||
|
gohcl.EncodeIntoBody(&app, f.Body())
|
||||||
|
fmt.Printf("%s", f.Bytes())
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// name = "awesome-app"
|
||||||
|
// description = "Such an awesome application"
|
||||||
|
//
|
||||||
|
// constraints {
|
||||||
|
// os = "linux"
|
||||||
|
// arch = "amd64"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// service "web" {
|
||||||
|
// executable = ["./web", "--listen=:8080"]
|
||||||
|
// }
|
||||||
|
// service "worker" {
|
||||||
|
// executable = ["./worker"]
|
||||||
|
// }
|
||||||
|
}
|
||||||
185
bake/hclparser/gohcl/schema.go
Normal file
185
bake/hclparser/gohcl/schema.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||||
|
// given value, which must be a struct value or a pointer to one. If an
|
||||||
|
// inappropriate value is passed, this function will panic.
|
||||||
|
//
|
||||||
|
// The second return argument indicates whether the given struct includes
|
||||||
|
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||||
|
//
|
||||||
|
// This uses the tags on the fields of the struct to discover how each
|
||||||
|
// field's value should be expressed within configuration. If an invalid
|
||||||
|
// mapping is attempted, this function will panic.
|
||||||
|
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
|
||||||
|
ty := reflect.TypeOf(val)
|
||||||
|
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrSchemas []hcl.AttributeSchema
|
||||||
|
var blockSchemas []hcl.BlockHeaderSchema
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
|
||||||
|
attrNames := make([]string, 0, len(tags.Attributes))
|
||||||
|
for n := range tags.Attributes {
|
||||||
|
attrNames = append(attrNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(attrNames)
|
||||||
|
for _, n := range attrNames {
|
||||||
|
idx := tags.Attributes[n]
|
||||||
|
optional := tags.Optional[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
|
||||||
|
var required bool
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case field.Type.AssignableTo(exprType):
|
||||||
|
//nolint:misspell
|
||||||
|
// If we're decoding to hcl.Expression then absense can be
|
||||||
|
// indicated via a null value, so we don't specify that
|
||||||
|
// the field is required during decoding.
|
||||||
|
required = false
|
||||||
|
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||||
|
required = true
|
||||||
|
default:
|
||||||
|
required = false
|
||||||
|
}
|
||||||
|
|
||||||
|
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||||
|
Name: n,
|
||||||
|
Required: required,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
blockNames := make([]string, 0, len(tags.Blocks))
|
||||||
|
for n := range tags.Blocks {
|
||||||
|
blockNames = append(blockNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(blockNames)
|
||||||
|
for _, n := range blockNames {
|
||||||
|
idx := tags.Blocks[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
fty := field.Type
|
||||||
|
if fty.Kind() == reflect.Slice {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() == reflect.Ptr {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
ftags := getFieldTags(fty)
|
||||||
|
var labelNames []string
|
||||||
|
if len(ftags.Labels) > 0 {
|
||||||
|
labelNames = make([]string, len(ftags.Labels))
|
||||||
|
for i, l := range ftags.Labels {
|
||||||
|
labelNames[i] = l.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||||
|
Type: n,
|
||||||
|
LabelNames: labelNames,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
partial = tags.Remain != nil
|
||||||
|
schema = &hcl.BodySchema{
|
||||||
|
Attributes: attrSchemas,
|
||||||
|
Blocks: blockSchemas,
|
||||||
|
}
|
||||||
|
return schema, partial
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldTags struct {
|
||||||
|
Attributes map[string]int
|
||||||
|
Blocks map[string]int
|
||||||
|
Labels []labelField
|
||||||
|
Remain *int
|
||||||
|
Body *int
|
||||||
|
Optional map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type labelField struct {
|
||||||
|
FieldIndex int
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||||
|
ret := &fieldTags{
|
||||||
|
Attributes: map[string]int{},
|
||||||
|
Blocks: map[string]int{},
|
||||||
|
Optional: map[string]bool{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := ty.NumField()
|
||||||
|
for i := range ct {
|
||||||
|
field := ty.Field(i)
|
||||||
|
tag := field.Tag.Get("hcl")
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := strings.Index(tag, ",")
|
||||||
|
var name, kind string
|
||||||
|
if comma != -1 {
|
||||||
|
name = tag[:comma]
|
||||||
|
kind = tag[comma+1:]
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
kind = "attr"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case "attr":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
case "block":
|
||||||
|
ret.Blocks[name] = i
|
||||||
|
case "label":
|
||||||
|
ret.Labels = append(ret.Labels, labelField{
|
||||||
|
FieldIndex: i,
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
case "remain":
|
||||||
|
if ret.Remain != nil {
|
||||||
|
panic("only one 'remain' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Remain = &idx
|
||||||
|
case "body":
|
||||||
|
if ret.Body != nil {
|
||||||
|
panic("only one 'body' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Body = &idx
|
||||||
|
case "optional":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
ret.Optional[name] = true
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
233
bake/hclparser/gohcl/schema_test.go
Normal file
233
bake/hclparser/gohcl/schema_test.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestImpliedBodySchema(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
val any
|
||||||
|
wantSchema *hcl.BodySchema
|
||||||
|
wantPartial bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
struct{}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Ignored bool
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr1 bool `hcl:"attr1"`
|
||||||
|
Attr2 bool `hcl:"attr2"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr1",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "attr2",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr *bool `hcl:"attr,attr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct{} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing []struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing *struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Something string `hcl:"something"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Config string `hcl:",remain"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Expr hcl.Expression `hcl:"expr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "expr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Meh string `hcl:"meh,optional"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "meh",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
|
||||||
|
schema, partial := ImpliedBodySchema(test.val)
|
||||||
|
if !reflect.DeepEqual(schema, test.wantSchema) {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong schema\ngot: %s\nwant: %s",
|
||||||
|
spew.Sdump(schema), spew.Sdump(test.wantSchema),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if partial != test.wantPartial {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong partial flag\ngot: %#v\nwant: %#v",
|
||||||
|
partial, test.wantPartial,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
19
bake/hclparser/gohcl/types.go
Normal file
19
bake/hclparser/gohcl/types.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var victimExpr hcl.Expression
|
||||||
|
var victimBody hcl.Body
|
||||||
|
|
||||||
|
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||||
|
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||||
|
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
|
||||||
|
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||||
|
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
||||||
@@ -7,15 +7,15 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/bake/hclparser/gohcl"
|
||||||
"github.com/docker/buildx/util/userfunc"
|
"github.com/docker/buildx/util/userfunc"
|
||||||
"github.com/hashicorp/hcl/v2"
|
"github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/gohcl"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/gocty"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
@@ -25,11 +25,17 @@ type Opt struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type variable struct {
|
type variable struct {
|
||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||||
Description string `json:"description,omitempty" hcl:"description,optional"`
|
Description string `json:"description,omitempty" hcl:"description,optional"`
|
||||||
Body hcl.Body `json:"-" hcl:",body"`
|
Validations []*variableValidation `json:"validation,omitempty" hcl:"validation,block"`
|
||||||
Remain hcl.Body `json:"-" hcl:",remain"`
|
Body hcl.Body `json:"-" hcl:",body"`
|
||||||
|
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type variableValidation struct {
|
||||||
|
Condition hcl.Expression `json:"condition" hcl:"condition"`
|
||||||
|
ErrorMessage hcl.Expression `json:"error_message" hcl:"error_message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type functionDef struct {
|
type functionDef struct {
|
||||||
@@ -448,7 +454,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// decode!
|
// decode!
|
||||||
diag = gohcl.DecodeBody(body(), ectx, output.Interface())
|
diag = decodeBody(body(), ectx, output.Interface())
|
||||||
if diag.HasErrors() {
|
if diag.HasErrors() {
|
||||||
return diag
|
return diag
|
||||||
}
|
}
|
||||||
@@ -470,11 +476,11 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// store the result into the evaluation context (so it can be referenced)
|
// store the result into the evaluation context (so it can be referenced)
|
||||||
outputType, err := gocty.ImpliedType(output.Interface())
|
outputType, err := ImpliedType(output.Interface())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
|
outputValue, err := ToCtyValue(output.Interface(), outputType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -486,7 +492,12 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
m = map[string]cty.Value{}
|
m = map[string]cty.Value{}
|
||||||
}
|
}
|
||||||
m[name] = outputValue
|
m[name] = outputValue
|
||||||
p.ectx.Variables[block.Type] = cty.MapVal(m)
|
|
||||||
|
// The logical contents of this structure is similar to a map,
|
||||||
|
// but it's possible for some attributes to be different in a way that's
|
||||||
|
// illegal for a map so we use an object here instead which is structurally
|
||||||
|
// equivalent but allows disparate types for different keys.
|
||||||
|
p.ectx.Variables[block.Type] = cty.ObjectVal(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -541,10 +552,37 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
for _, v := range vars {
|
||||||
|
for _, validation := range v.Validations {
|
||||||
|
condition, condDiags := validation.Condition.Value(ectx)
|
||||||
|
if condDiags.HasErrors() {
|
||||||
|
diags = append(diags, condDiags...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !condition.True() {
|
||||||
|
message, msgDiags := validation.ErrorMessage.Value(ectx)
|
||||||
|
if msgDiags.HasErrors() {
|
||||||
|
diags = append(diags, msgDiags...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Validation failed",
|
||||||
|
Detail: message.AsString(),
|
||||||
|
Subject: validation.Condition.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
type Variable struct {
|
type Variable struct {
|
||||||
Name string
|
Name string `json:"name"`
|
||||||
Description string
|
Description string `json:"description,omitempty"`
|
||||||
Value *string
|
Value *string `json:"value,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ParseMeta struct {
|
type ParseMeta struct {
|
||||||
@@ -552,7 +590,7 @@ type ParseMeta struct {
|
|||||||
AllVariables []*Variable
|
AllVariables []*Variable
|
||||||
}
|
}
|
||||||
|
|
||||||
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
|
||||||
reserved := map[string]struct{}{}
|
reserved := map[string]struct{}{}
|
||||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||||
|
|
||||||
@@ -686,6 +724,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
}
|
}
|
||||||
vars = append(vars, v)
|
vars = append(vars, v)
|
||||||
}
|
}
|
||||||
|
if diags := p.validateVariables(p.vars, p.ectx); diags.HasErrors() {
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
for k := range p.funcs {
|
for k := range p.funcs {
|
||||||
if err := p.resolveFunction(p.ectx, k); err != nil {
|
if err := p.resolveFunction(p.ectx, k); err != nil {
|
||||||
@@ -723,7 +764,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
types := map[string]field{}
|
types := map[string]field{}
|
||||||
renamed := map[string]map[string][]string{}
|
renamed := map[string]map[string][]string{}
|
||||||
vt := reflect.ValueOf(val).Elem().Type()
|
vt := reflect.ValueOf(val).Elem().Type()
|
||||||
for i := 0; i < vt.NumField(); i++ {
|
for i := range vt.NumField() {
|
||||||
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
||||||
|
|
||||||
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
||||||
@@ -791,7 +832,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
oldValue, exists := t.values[lblName]
|
oldValue, exists := t.values[lblName]
|
||||||
if !exists && lblExists {
|
if !exists && lblExists {
|
||||||
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
||||||
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
|
for i := range v.Elem().Field(t.idx).Len() {
|
||||||
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
||||||
exists = true
|
exists = true
|
||||||
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
||||||
@@ -858,7 +899,7 @@ func wrapErrorDiagnostic(message string, err error, subject *hcl.Range, context
|
|||||||
|
|
||||||
func setName(v reflect.Value, name string) {
|
func setName(v reflect.Value, name string) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
for _, t := range parts[1:] {
|
||||||
if t == "label" {
|
if t == "label" {
|
||||||
@@ -870,12 +911,10 @@ func setName(v reflect.Value, name string) {
|
|||||||
|
|
||||||
func getName(v reflect.Value) (string, bool) {
|
func getName(v reflect.Value) (string, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
return v.Elem().Field(i).String(), true
|
||||||
return v.Elem().Field(i).String(), true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
@@ -883,12 +922,10 @@ func getName(v reflect.Value) (string, bool) {
|
|||||||
|
|
||||||
func getNameIndex(v reflect.Value) (int, bool) {
|
func getNameIndex(v reflect.Value) (int, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
return i, true
|
||||||
return i, true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, false
|
return 0, false
|
||||||
@@ -947,3 +984,8 @@ func key(ks ...any) uint64 {
|
|||||||
}
|
}
|
||||||
return hash.Sum64()
|
return hash.Sum64()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
|
||||||
|
return dec.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|||||||
@@ -170,7 +170,6 @@ func indexOfFunc() function.Function {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cty.NilVal, errors.New("item not found")
|
return cty.NilVal, errors.New("item not found")
|
||||||
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
160
bake/hclparser/type_implied.go
Normal file
160
bake/hclparser/type_implied.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
// MIT License
|
||||||
|
//
|
||||||
|
// Copyright (c) 2017-2018 Martin Atkins
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
|
||||||
|
// to find a suitable cty.Type instance that could be used for a conversion
|
||||||
|
// with ToCtyValue.
|
||||||
|
//
|
||||||
|
// This allows -- for simple situations at least -- types to be defined just
|
||||||
|
// once in Go and the cty types derived from the Go types, but in the process
|
||||||
|
// it makes some assumptions that may be undesirable so applications are
|
||||||
|
// encouraged to build their cty types directly if exacting control is
|
||||||
|
// required.
|
||||||
|
//
|
||||||
|
// Not all Go types can be represented as cty types, so an error may be
|
||||||
|
// returned which is usually considered to be a bug in the calling program.
|
||||||
|
// In particular, ImpliedType will never use capsule types in its returned
|
||||||
|
// type, because it cannot know the capsule types supported by the calling
|
||||||
|
// program.
|
||||||
|
func ImpliedType(gv any) (cty.Type, error) {
|
||||||
|
rt := reflect.TypeOf(gv)
|
||||||
|
var path cty.Path
|
||||||
|
return impliedType(rt, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if ety, err := impliedTypeExt(rt, path); err == nil {
|
||||||
|
return ety, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch rt.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
return impliedType(rt.Elem(), path)
|
||||||
|
|
||||||
|
// Primitive types
|
||||||
|
case reflect.Bool:
|
||||||
|
return cty.Bool, nil
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.String:
|
||||||
|
return cty.String, nil
|
||||||
|
|
||||||
|
// Collection types
|
||||||
|
case reflect.Slice:
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.List(ety), nil
|
||||||
|
case reflect.Map:
|
||||||
|
if !stringType.AssignableTo(rt.Key()) {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
|
||||||
|
}
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.Map(ety), nil
|
||||||
|
|
||||||
|
// Structural types
|
||||||
|
case reflect.Struct:
|
||||||
|
return impliedStructType(rt, path)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if valueType.AssignableTo(rt) {
|
||||||
|
// Special case: cty.Value represents cty.DynamicPseudoType, for
|
||||||
|
// type conformance checking.
|
||||||
|
return cty.DynamicPseudoType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIdxs := structTagIndices(rt)
|
||||||
|
if len(fieldIdxs) == 0 {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
atys := make(map[string]cty.Type, len(fieldIdxs))
|
||||||
|
|
||||||
|
{
|
||||||
|
// Temporary extension of path for attributes
|
||||||
|
path := append(path, nil)
|
||||||
|
|
||||||
|
for k, fi := range fieldIdxs {
|
||||||
|
path[len(path)-1] = cty.GetAttrStep{Name: k}
|
||||||
|
|
||||||
|
ft := rt.Field(fi).Type
|
||||||
|
aty, err := impliedType(ft, path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
|
||||||
|
atys[k] = aty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.Object(atys), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
valueType = reflect.TypeOf(cty.Value{})
|
||||||
|
stringType = reflect.TypeOf("")
|
||||||
|
)
|
||||||
|
|
||||||
|
// structTagIndices interrogates the fields of the given type (which must
|
||||||
|
// be a struct type, or we'll panic) and returns a map from the cty
|
||||||
|
// attribute names declared via struct tags to the indices of the
|
||||||
|
// fields holding those tags.
|
||||||
|
//
|
||||||
|
// This function will panic if two fields within the struct are tagged with
|
||||||
|
// the same cty attribute name.
|
||||||
|
func structTagIndices(st reflect.Type) map[string]int {
|
||||||
|
ct := st.NumField()
|
||||||
|
ret := make(map[string]int, ct)
|
||||||
|
|
||||||
|
for i := range ct {
|
||||||
|
field := st.Field(i)
|
||||||
|
attrName := field.Tag.Get("cty")
|
||||||
|
if attrName != "" {
|
||||||
|
ret[attrName] = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
166
bake/hclparser/type_implied_ext.go
Normal file
166
bake/hclparser/type_implied_ext.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/errdefs"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ToCtyValueConverter interface {
|
||||||
|
// ToCtyValue will convert this capsule value into a native
|
||||||
|
// cty.Value. This should not return a capsule type.
|
||||||
|
ToCtyValue() cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type FromCtyValueConverter interface {
|
||||||
|
// FromCtyValue will initialize this value using a cty.Value.
|
||||||
|
FromCtyValue(in cty.Value, path cty.Path) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unwrapCapsuleValueExtension extensionType = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
rt = reflect.PointerTo(rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isCapsuleType(rt) {
|
||||||
|
return capsuleValueCapsuleType(rt), nil
|
||||||
|
}
|
||||||
|
return cty.NilType, errdefs.ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCapsuleType(rt reflect.Type) bool {
|
||||||
|
fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
|
||||||
|
}
|
||||||
|
|
||||||
|
var capsuleValueTypes sync.Map
|
||||||
|
|
||||||
|
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
panic("capsule value must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := rt.Elem()
|
||||||
|
if val, loaded := capsuleValueTypes.Load(elem); loaded {
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
|
||||||
|
// First time used. Initialize new capsule ops.
|
||||||
|
ops := &cty.CapsuleOps{
|
||||||
|
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
|
||||||
|
return func(in cty.Value, p cty.Path) (any, error) {
|
||||||
|
rv := reflect.New(elem).Interface()
|
||||||
|
if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
|
||||||
|
return func(in any, _ cty.Path) (cty.Value, error) {
|
||||||
|
rv := reflect.ValueOf(in).Convert(toCtyValueType)
|
||||||
|
v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
|
||||||
|
return convert.Convert(v, want)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ExtensionData: func(key any) any {
|
||||||
|
switch key {
|
||||||
|
case unwrapCapsuleValueExtension:
|
||||||
|
zero := reflect.Zero(elem).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
zero = reflect.Zero(rt).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to store the new type. Use whichever was loaded first in the case
|
||||||
|
// of a race condition.
|
||||||
|
ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
|
||||||
|
val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnwrapCtyValue will unwrap capsule type values into their native cty value
|
||||||
|
// equivalents if possible.
|
||||||
|
func UnwrapCtyValue(in cty.Value) cty.Value {
|
||||||
|
want := toCtyValueType(in.Type())
|
||||||
|
if in.Type().Equals(want) {
|
||||||
|
return in
|
||||||
|
} else if out, err := convert.Convert(in, want); err == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return cty.NullVal(want)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toCtyValueType(in cty.Type) cty.Type {
|
||||||
|
if et := in.MapElementType(); et != nil {
|
||||||
|
return cty.Map(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.SetElementType(); et != nil {
|
||||||
|
return cty.Set(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.ListElementType(); et != nil {
|
||||||
|
return cty.List(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsObjectType() {
|
||||||
|
var optional []string
|
||||||
|
inAttrTypes := in.AttributeTypes()
|
||||||
|
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
|
||||||
|
for name, typ := range inAttrTypes {
|
||||||
|
outAttrTypes[name] = toCtyValueType(typ)
|
||||||
|
if in.AttributeOptional(name) {
|
||||||
|
optional = append(optional, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsTupleType() {
|
||||||
|
inTypes := in.TupleElementTypes()
|
||||||
|
outTypes := make([]cty.Type, len(inTypes))
|
||||||
|
for i, typ := range inTypes {
|
||||||
|
outTypes[i] = toCtyValueType(typ)
|
||||||
|
}
|
||||||
|
return cty.Tuple(outTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsCapsuleType() {
|
||||||
|
if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
|
||||||
|
return out.(cty.Type)
|
||||||
|
}
|
||||||
|
return cty.DynamicPseudoType
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
|
||||||
|
out, err := gocty.ToCtyValue(val, ty)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
return UnwrapCtyValue(out), nil
|
||||||
|
}
|
||||||
@@ -15,9 +15,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/v2/core/images"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
@@ -39,7 +40,6 @@ import (
|
|||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
"github.com/moby/buildkit/util/tracing"
|
"github.com/moby/buildkit/util/tracing"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
@@ -61,25 +61,28 @@ const (
|
|||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
|
||||||
Ref string
|
Ref string
|
||||||
Allow []entitlements.Entitlement
|
Allow []string
|
||||||
Attests map[string]*string
|
Attests map[string]*string
|
||||||
BuildArgs map[string]string
|
BuildArgs map[string]string
|
||||||
CacheFrom []client.CacheOptionsEntry
|
CacheFrom []client.CacheOptionsEntry
|
||||||
CacheTo []client.CacheOptionsEntry
|
CacheTo []client.CacheOptionsEntry
|
||||||
CgroupParent string
|
CgroupParent string
|
||||||
Exports []client.ExportEntry
|
Exports []client.ExportEntry
|
||||||
ExtraHosts []string
|
ExportsLocalPathsTemporary []string // should be removed after client.ExportEntry update in buildkit v0.19.0
|
||||||
Labels map[string]string
|
ExtraHosts []string
|
||||||
NetworkMode string
|
Labels map[string]string
|
||||||
NoCache bool
|
NetworkMode string
|
||||||
NoCacheFilter []string
|
NoCache bool
|
||||||
Platforms []specs.Platform
|
NoCacheFilter []string
|
||||||
Pull bool
|
Platforms []specs.Platform
|
||||||
ShmSize opts.MemBytes
|
Pull bool
|
||||||
Tags []string
|
SecretSpecs []*controllerapi.Secret
|
||||||
Target string
|
SSHSpecs []*controllerapi.SSH
|
||||||
Ulimits *opts.UlimitOpt
|
ShmSize opts.MemBytes
|
||||||
|
Tags []string
|
||||||
|
Target string
|
||||||
|
Ulimits *opts.UlimitOpt
|
||||||
|
|
||||||
Session []session.Attachable
|
Session []session.Attachable
|
||||||
Linked bool // Linked marks this target as exclusively linked (not requested by the user).
|
Linked bool // Linked marks this target as exclusively linked (not requested by the user).
|
||||||
@@ -536,7 +539,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
node := dp.Node().Driver
|
node := dp.Node().Driver
|
||||||
if node.IsMobyDriver() {
|
if node.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
if e.Type == "moby" && e.Attrs["push"] != "" {
|
if e.Type == "moby" && e.Attrs["push"] != "" && !node.Features(ctx)[driver.DirectPush] {
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
pushNames = e.Attrs["name"]
|
pushNames = e.Attrs["name"]
|
||||||
if pushNames == "" {
|
if pushNames == "" {
|
||||||
@@ -619,7 +622,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||||||
// This is fallback for some very old buildkit versions.
|
// This is fallback for some very old buildkit versions.
|
||||||
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
||||||
// not manifest list but actually both are handled because for Docker mediatypes the
|
// not manifest list but actually both are handled because for Docker mediatypes the
|
||||||
// mediatype value in the Accpet header does not seem to matter.
|
// mediatype value in the Accept header does not seem to matter.
|
||||||
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||||
if ok {
|
if ok {
|
||||||
descs = append(descs, specs.Descriptor{
|
descs = append(descs, specs.Descriptor{
|
||||||
@@ -831,7 +834,7 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
img, _, err := api.ImageInspectWithRaw(ctx, name)
|
img, err := api.ImageInspect(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"net"
|
"net"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
@@ -37,15 +38,7 @@ func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platfor
|
|||||||
for _, ls := range resolved {
|
for _, ls := range resolved {
|
||||||
for _, rn := range ls {
|
for _, rn := range ls {
|
||||||
if platform != nil {
|
if platform != nil {
|
||||||
p := *platform
|
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
|
||||||
var found bool
|
|
||||||
for _, pp := range rn.platforms {
|
|
||||||
if platforms.Only(p).Match(pp) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package build
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
@@ -221,7 +222,7 @@ func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatf
|
|||||||
for i, node := range r.nodes {
|
for i, node := range r.nodes {
|
||||||
platforms := node.Platforms
|
platforms := node.Platforms
|
||||||
if additionalPlatforms != nil {
|
if additionalPlatforms != nil {
|
||||||
platforms = append([]specs.Platform{}, platforms...)
|
platforms = slices.Clone(platforms)
|
||||||
platforms = append(platforms, additionalPlatforms(i, node)...)
|
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||||
}
|
}
|
||||||
for _, p2 := range platforms {
|
for _, p2 := range platforms {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func setupTest(tb testing.TB) {
|
|||||||
gitutil.GitInit(c, tb)
|
gitutil.GitInit(c, tb)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
assert.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, tb, "Dockerfile")
|
gitutil.GitAdd(c, tb, "Dockerfile")
|
||||||
gitutil.GitCommit(c, tb, "initial commit")
|
gitutil.GitCommit(c, tb, "initial commit")
|
||||||
@@ -32,7 +32,7 @@ func setupTest(tb testing.TB) {
|
|||||||
|
|
||||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||||
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||||
@@ -47,7 +47,7 @@ func TestGetGitAttributesNoContext(t *testing.T) {
|
|||||||
setupTest(t)
|
setupTest(t)
|
||||||
|
|
||||||
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var so client.SolveOpt
|
var so client.SolveOpt
|
||||||
addGitAttrs(&so)
|
addGitAttrs(&so)
|
||||||
assert.Empty(t, so.FrontendAttrs)
|
assert.Empty(t, so.FrontendAttrs)
|
||||||
@@ -195,8 +195,8 @@ func TestLocalDirsSub(t *testing.T) {
|
|||||||
gitutil.GitInit(c, t)
|
gitutil.GitInit(c, t)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
assert.NoError(t, os.MkdirAll("app", 0755))
|
require.NoError(t, os.MkdirAll("app", 0755))
|
||||||
assert.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, t, "app/Dockerfile")
|
gitutil.GitAdd(c, t, "app/Dockerfile")
|
||||||
gitutil.GitCommit(c, t, "initial commit")
|
gitutil.GitCommit(c, t, "initial commit")
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
|
|
||||||
type Container struct {
|
type Container struct {
|
||||||
cancelOnce sync.Once
|
cancelOnce sync.Once
|
||||||
containerCancel func()
|
containerCancel func(error)
|
||||||
isUnavailable atomic.Bool
|
isUnavailable atomic.Bool
|
||||||
initStarted atomic.Bool
|
initStarted atomic.Bool
|
||||||
container gateway.Container
|
container gateway.Container
|
||||||
@@ -31,18 +31,18 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
|||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
go func() {
|
go func() {
|
||||||
<-mainCtx.Done()
|
<-mainCtx.Done()
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
containerCtx, containerCancel := context.WithCancel(ctx)
|
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||||
defer containerCancel()
|
defer containerCancel(errors.WithStack(context.Canceled))
|
||||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -83,7 +83,7 @@ func (c *Container) Cancel() {
|
|||||||
c.markUnavailable()
|
c.markUnavailable()
|
||||||
c.cancelOnce.Do(func() {
|
c.cancelOnce.Do(func() {
|
||||||
if c.containerCancel != nil {
|
if c.containerCancel != nil {
|
||||||
c.containerCancel()
|
c.containerCancel(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
close(c.releaseCh)
|
close(c.releaseCh)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
"github.com/containerd/containerd/content/local"
|
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
@@ -318,7 +318,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
|
|||||||
switch opt.NetworkMode {
|
switch opt.NetworkMode {
|
||||||
case "host":
|
case "host":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
|
||||||
case "none":
|
case "none":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
"github.com/containerd/containerd/content/proxy"
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func generateRandomData(size int) []byte {
|
func generateRandomData(size int) []byte {
|
||||||
@@ -29,11 +28,11 @@ func TestSyncMultiReaderParallel(t *testing.T) {
|
|||||||
|
|
||||||
readers := make([]io.ReadCloser, numReaders)
|
readers := make([]io.ReadCloser, numReaders)
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
readers[i] = mr.NewReadCloser()
|
readers[i] = mr.NewReadCloser()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(readerId int) {
|
go func(readerId int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -57,7 +56,7 @@ func TestSyncMultiReaderParallel(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, err, "Reader %d error", readerId)
|
assert.NoError(t, err, "Reader %d error", readerId)
|
||||||
|
|
||||||
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||||
t.Logf("Reader %d closing", readerId)
|
t.Logf("Reader %d closing", readerId)
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
var respHandle *ResultHandle
|
var respHandle *ResultHandle
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer cancel(context.Canceled) // ensure no dangling processes
|
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||||
|
|
||||||
var res *gateway.Result
|
var res *gateway.Result
|
||||||
var err error
|
var err error
|
||||||
@@ -181,7 +181,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
case <-respHandle.done:
|
case <-respHandle.done:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
}, nil)
|
}, nil)
|
||||||
if respHandle != nil {
|
if respHandle != nil {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ func TestToBuildkitExtraHosts(t *testing.T) {
|
|||||||
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
||||||
if tc.expectedErr == "" {
|
if tc.expectedErr == "" {
|
||||||
require.Equal(t, tc.expectedOut, actualOut)
|
require.Equal(t, tc.expectedOut, actualOut)
|
||||||
require.Nil(t, actualErr)
|
require.NoError(t, actualErr)
|
||||||
} else {
|
} else {
|
||||||
require.Zero(t, actualOut)
|
require.Zero(t, actualOut)
|
||||||
require.Error(t, actualErr, tc.expectedErr)
|
require.Error(t, actualErr, tc.expectedErr)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -199,7 +200,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && len(errCh) == len(toBoot) {
|
if err == nil && len(errCh) > 0 {
|
||||||
return false, <-errCh
|
return false, <-errCh
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
@@ -288,7 +289,15 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
builders := make([]*Builder, len(storeng))
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Slice(contexts, func(i, j int) bool {
|
||||||
|
return contexts[i].Name < contexts[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for i, ng := range storeng {
|
for i, ng := range storeng {
|
||||||
b, err := New(dockerCli,
|
b, err := New(dockerCli,
|
||||||
@@ -303,14 +312,6 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||||||
seen[b.NodeGroup.Name] = struct{}{}
|
seen[b.NodeGroup.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
|
||||||
return contexts[i].Name < contexts[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, c := range contexts {
|
for _, c := range contexts {
|
||||||
// if a context has the same name as an instance from the store, do not
|
// if a context has the same name as an instance from the store, do not
|
||||||
// add it to the builders list. An instance from the store takes
|
// add it to the builders list. An instance from the store takes
|
||||||
@@ -522,8 +523,9 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -655,13 +657,7 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
|
|||||||
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||||
_ = flags.Parse(res)
|
_ = flags.Parse(res)
|
||||||
|
|
||||||
var hasNetworkHostEntitlement bool
|
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
|
||||||
for _, e := range allowInsecureEntitlements {
|
|
||||||
if e == "network.host" {
|
|
||||||
hasNetworkHostEntitlement = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var hasNetworkHostEntitlementInConf bool
|
var hasNetworkHostEntitlementInConf bool
|
||||||
if buildkitdConfigFile != "" {
|
if buildkitdConfigFile != "" {
|
||||||
@@ -670,11 +666,8 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
|
|||||||
return nil, err
|
return nil, err
|
||||||
} else if btoml != nil {
|
} else if btoml != nil {
|
||||||
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||||
for _, e := range ies.([]string) {
|
if slices.Contains(ies.([]string), "network.host") {
|
||||||
if e == "network.host" {
|
hasNetworkHostEntitlementInConf = true
|
||||||
hasNetworkHostEntitlementInConf = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,17 +19,20 @@ func TestCsvToMap(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Contains(t, r, "tolerations")
|
require.Contains(t, r, "tolerations")
|
||||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
require.Equal(t, "key=foo,value=bar;key=foo2,value=bar2", r["tolerations"])
|
||||||
|
|
||||||
require.Contains(t, r, "replicas")
|
require.Contains(t, r, "replicas")
|
||||||
require.Equal(t, r["replicas"], "1")
|
require.Equal(t, "1", r["replicas"])
|
||||||
|
|
||||||
require.Contains(t, r, "namespace")
|
require.Contains(t, r, "namespace")
|
||||||
require.Equal(t, r["namespace"], "default")
|
require.Equal(t, "default", r["namespace"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseBuildkitdFlags(t *testing.T) {
|
func TestParseBuildkitdFlags(t *testing.T) {
|
||||||
buildkitdConf := `
|
dirConf := t.TempDir()
|
||||||
|
|
||||||
|
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
|
||||||
# debug enables additional debug logging
|
# debug enables additional debug logging
|
||||||
debug = true
|
debug = true
|
||||||
# insecure-entitlements allows insecure entitlements, disabled by default.
|
# insecure-entitlements allows insecure entitlements, disabled by default.
|
||||||
@@ -37,10 +40,18 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
|
|||||||
[log]
|
[log]
|
||||||
# log formatter: json or text
|
# log formatter: json or text
|
||||||
format = "text"
|
format = "text"
|
||||||
`
|
`), 0644))
|
||||||
dirConf := t.TempDir()
|
|
||||||
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
|
||||||
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(buildkitdConf), 0644))
|
require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
|
||||||
|
[worker.oci]
|
||||||
|
gc = "maybe"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
|
||||||
|
foo = "bar"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -157,6 +168,26 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
|
|||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"error parsing buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfBrokenPath,
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"unknown field in buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfUnknownFieldPath,
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
tt := tt
|
tt := tt
|
||||||
|
|||||||
@@ -32,10 +32,11 @@ type Node struct {
|
|||||||
Err error
|
Err error
|
||||||
|
|
||||||
// worker settings
|
// worker settings
|
||||||
IDs []string
|
IDs []string
|
||||||
Platforms []ocispecs.Platform
|
Platforms []ocispecs.Platform
|
||||||
GCPolicy []client.PruneInfo
|
GCPolicy []client.PruneInfo
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
|
CDIDevices []client.CDIDevice
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nodes returns nodes for this builder.
|
// Nodes returns nodes for this builder.
|
||||||
@@ -168,7 +169,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
|||||||
// dynamic nodes are used in Kubernetes driver.
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
||||||
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
||||||
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
|
for i := range di.DriverInfo.DynamicNodes {
|
||||||
diClone := di
|
diClone := di
|
||||||
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
diClone.Platforms = pl
|
diClone.Platforms = pl
|
||||||
@@ -259,6 +260,7 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
|
|||||||
n.GCPolicy = w.GCPolicy
|
n.GCPolicy = w.GCPolicy
|
||||||
n.Labels = w.Labels
|
n.Labels = w.Labels
|
||||||
}
|
}
|
||||||
|
n.CDIDevices = w.CDIDevices
|
||||||
}
|
}
|
||||||
sort.Strings(n.IDs)
|
sort.Strings(n.IDs)
|
||||||
n.Platforms = platformutil.Dedupe(n.Platforms)
|
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||||
@@ -20,9 +21,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
//nolint:staticcheck // vendored dependencies may still use this
|
|
||||||
"github.com/containerd/containerd/pkg/seed"
|
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
|
|
||||||
_ "github.com/docker/buildx/driver/docker"
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
@@ -35,9 +33,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//nolint:staticcheck
|
|
||||||
seed.WithTimeAndRand()
|
|
||||||
|
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +42,8 @@ func runStandalone(cmd *command.DockerCli) error {
|
|||||||
}
|
}
|
||||||
defer flushMetrics(cmd)
|
defer flushMetrics(cmd)
|
||||||
|
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
executable := os.Args[0]
|
||||||
|
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
||||||
return rootCmd.Execute()
|
return rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
214
commands/bake.go
214
commands/bake.go
@@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/docker/buildx/controller/pb"
|
"github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/localstate"
|
"github.com/docker/buildx/localstate"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
@@ -38,30 +37,40 @@ import (
|
|||||||
"github.com/moby/buildkit/util/progress/progressui"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
overrides []string
|
overrides []string
|
||||||
printOnly bool
|
|
||||||
listTargets bool
|
sbom string
|
||||||
listVars bool
|
provenance string
|
||||||
sbom string
|
allow []string
|
||||||
provenance string
|
|
||||||
allow []string
|
|
||||||
|
|
||||||
builder string
|
builder string
|
||||||
metadataFile string
|
metadataFile string
|
||||||
exportPush bool
|
exportPush bool
|
||||||
exportLoad bool
|
exportLoad bool
|
||||||
callFunc string
|
callFunc string
|
||||||
|
|
||||||
|
print bool
|
||||||
|
list string
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
listTargets bool
|
||||||
|
listVars bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
|
||||||
|
attribute.String("builder", in.builder),
|
||||||
|
attribute.StringSlice("targets", targets),
|
||||||
|
attribute.StringSlice("files", in.files),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -107,16 +116,27 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get current working directory")
|
||||||
|
}
|
||||||
|
// filesystem access under the current working directory is allowed by default
|
||||||
|
ent.FSRead = append(ent.FSRead, wd)
|
||||||
|
ent.FSWrite = append(ent.FSWrite, wd)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer cancel(errors.WithStack(context.Canceled))
|
||||||
|
|
||||||
var nodes []builder.Node
|
var nodes []builder.Node
|
||||||
var progressConsoleDesc, progressTextDesc string
|
var progressConsoleDesc, progressTextDesc string
|
||||||
|
|
||||||
|
if in.print && in.list != "" {
|
||||||
|
return errors.New("--print and --list are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
// instance only needed for reading remote bake files or building
|
// instance only needed for reading remote bake files or building
|
||||||
var driverType string
|
var driverType string
|
||||||
if url != "" || !in.printOnly {
|
if url != "" || !(in.print || in.list != "") {
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(in.builder),
|
||||||
builder.WithContextPathHash(contextPathHash),
|
builder.WithContextPathHash(contextPathHash),
|
||||||
@@ -177,7 +197,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.listTargets || in.listVars {
|
if in.list != "" {
|
||||||
cfg, pm, err := bake.ParseFiles(files, defaults)
|
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -185,14 +205,19 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err = printer.Wait(); err != nil {
|
if err = printer.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if in.listTargets {
|
list, err := parseList(in.list)
|
||||||
return printTargetList(dockerCli.Out(), cfg)
|
if err != nil {
|
||||||
} else if in.listVars {
|
return err
|
||||||
return printVars(dockerCli.Out(), pm.AllVariables)
|
}
|
||||||
|
switch list.Type {
|
||||||
|
case "targets":
|
||||||
|
return printTargetList(dockerCli.Out(), list.Format, cfg)
|
||||||
|
case "variables":
|
||||||
|
return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults)
|
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults, &ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -224,7 +249,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
Target: tgts,
|
Target: tgts,
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
if in.print {
|
||||||
if err = printer.Wait(); err != nil {
|
if err = printer.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -250,8 +275,10 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
if progressMode != progressui.RawJSONMode {
|
||||||
return err
|
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if printer.IsDone() {
|
if printer.IsDone() {
|
||||||
// init new printer as old one was stopped to show the prompt
|
// init new printer as old one was stopped to show the prompt
|
||||||
@@ -260,7 +287,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,7 +309,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||||
}
|
}
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 {
|
||||||
dt := make(map[string]interface{})
|
dt := make(map[string]any)
|
||||||
for t, r := range resp {
|
for t, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||||
}
|
}
|
||||||
@@ -420,6 +447,13 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
if !cmd.Flags().Lookup("pull").Changed {
|
if !cmd.Flags().Lookup("pull").Changed {
|
||||||
cFlags.pull = nil
|
cFlags.pull = nil
|
||||||
}
|
}
|
||||||
|
if options.list == "" {
|
||||||
|
if options.listTargets {
|
||||||
|
options.list = "targets"
|
||||||
|
} else if options.listVars {
|
||||||
|
options.list = "variables"
|
||||||
|
}
|
||||||
|
}
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
options.metadataFile = cFlags.metadataFile
|
options.metadataFile = cFlags.metadataFile
|
||||||
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
||||||
@@ -432,7 +466,6 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||||
@@ -443,20 +476,30 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||||
flags.Lookup("check").NoOptDefVal = "true"
|
flags.Lookup("check").NoOptDefVal = "true"
|
||||||
|
|
||||||
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
flags.BoolVar(&options.print, "print", false, "Print the options without building")
|
||||||
cobrautil.MarkFlagsExperimental(flags, "list-targets")
|
flags.StringVar(&options.list, "list", "", "List targets or variables")
|
||||||
flags.MarkHidden("list-targets")
|
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||||
|
flags.MarkHidden("list-targets")
|
||||||
|
flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
|
||||||
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||||
cobrautil.MarkFlagsExperimental(flags, "list-variables")
|
|
||||||
flags.MarkHidden("list-variables")
|
flags.MarkHidden("list-variables")
|
||||||
|
flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
|
||||||
|
|
||||||
commonBuildFlags(&cFlags, flags)
|
commonBuildFlags(&cFlags, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
|
||||||
|
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer l.MigrateIfNeeded()
|
||||||
|
|
||||||
prm := confutil.MetadataProvenance()
|
prm := confutil.MetadataProvenance()
|
||||||
if len(in.metadataFile) == 0 {
|
if len(in.metadataFile) == 0 {
|
||||||
prm = confutil.MetadataProvenanceModeDisabled
|
prm = confutil.MetadataProvenanceModeDisabled
|
||||||
@@ -476,19 +519,10 @@ func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string
|
|||||||
if len(refs) == 0 {
|
if len(refs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||||
Definition: dtdef,
|
Refs: refs,
|
||||||
Targets: targets,
|
Targets: targets,
|
||||||
Inputs: overrides,
|
|
||||||
Refs: refs,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -550,10 +584,70 @@ func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
type listEntry struct {
|
||||||
|
Type string
|
||||||
|
Format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseList(input string) (listEntry, error) {
|
||||||
|
res := listEntry{}
|
||||||
|
|
||||||
|
fields, err := csvvalue.Fields(input, nil)
|
||||||
|
if err != nil {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
|
||||||
|
res.Type = input
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.Type == "" {
|
||||||
|
for _, field := range fields {
|
||||||
|
key, value, ok := strings.Cut(field, "=")
|
||||||
|
if !ok {
|
||||||
|
return res, errors.Errorf("invalid value %s", field)
|
||||||
|
}
|
||||||
|
key = strings.TrimSpace(strings.ToLower(key))
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
res.Type = value
|
||||||
|
case "format":
|
||||||
|
res.Format = value
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Format == "" {
|
||||||
|
res.Format = "table"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Type {
|
||||||
|
case "targets", "variables":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list type %q", res.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Format {
|
||||||
|
case "table", "json":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list format %q", res.Format)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
|
||||||
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||||
return cmp.Compare(a.Name, b.Name)
|
return cmp.Compare(a.Name, b.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(vars)
|
||||||
|
}
|
||||||
|
|
||||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
defer tw.Flush()
|
defer tw.Flush()
|
||||||
|
|
||||||
@@ -571,12 +665,7 @@ func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printTargetList(w io.Writer, cfg *bake.Config) error {
|
func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
|
||||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
|
||||||
defer tw.Flush()
|
|
||||||
|
|
||||||
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
|
||||||
|
|
||||||
type targetOrGroup struct {
|
type targetOrGroup struct {
|
||||||
name string
|
name string
|
||||||
target *bake.Target
|
target *bake.Target
|
||||||
@@ -595,6 +684,20 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
return cmp.Compare(a.name, b.name)
|
return cmp.Compare(a.name, b.name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var tw *tabwriter.Writer
|
||||||
|
if format == "table" {
|
||||||
|
tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
type targetList struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Group bool `json:"group,omitempty"`
|
||||||
|
}
|
||||||
|
var targetsList []targetList
|
||||||
|
|
||||||
for _, tgt := range list {
|
for _, tgt := range list {
|
||||||
if strings.HasPrefix(tgt.name, "_") {
|
if strings.HasPrefix(tgt.name, "_") {
|
||||||
// convention for a private target
|
// convention for a private target
|
||||||
@@ -603,9 +706,9 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
var descr string
|
var descr string
|
||||||
if tgt.target != nil {
|
if tgt.target != nil {
|
||||||
descr = tgt.target.Description
|
descr = tgt.target.Description
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
|
||||||
} else if tgt.group != nil {
|
} else if tgt.group != nil {
|
||||||
descr = tgt.group.Description
|
descr = tgt.group.Description
|
||||||
|
|
||||||
if len(tgt.group.Targets) > 0 {
|
if len(tgt.group.Targets) > 0 {
|
||||||
slices.Sort(tgt.group.Targets)
|
slices.Sort(tgt.group.Targets)
|
||||||
names := strings.Join(tgt.group.Targets, ", ")
|
names := strings.Join(tgt.group.Targets, ", ")
|
||||||
@@ -615,8 +718,17 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
descr = names
|
descr = names
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
|
||||||
}
|
}
|
||||||
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
if format == "table" {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(targetsList)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -41,7 +42,7 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/atomicwriter"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||||
"github.com/moby/buildkit/frontend/subrequests"
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
@@ -156,7 +157,7 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inAttests := append([]string{}, o.attests...)
|
inAttests := slices.Clone(o.attests)
|
||||||
if o.provenance != "" {
|
if o.provenance != "" {
|
||||||
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
||||||
}
|
}
|
||||||
@@ -183,14 +184,17 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
|
cacheFrom, err := buildflags.ParseCacheEntry(o.cacheFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
|
opts.CacheFrom = cacheFrom.ToPB()
|
||||||
|
|
||||||
|
cacheTo, err := buildflags.ParseCacheEntry(o.cacheTo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
opts.CacheTo = cacheTo.ToPB()
|
||||||
|
|
||||||
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -282,7 +286,11 @@ func (o *buildOptionsHash) String() string {
|
|||||||
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
|
||||||
|
attribute.String("builder", options.builder),
|
||||||
|
attribute.String("context", options.contextPath),
|
||||||
|
attribute.String("dockerfile", options.dockerfileName),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -325,8 +333,8 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||||||
}
|
}
|
||||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
progressMode, err := options.toDisplayMode()
|
progressMode, err := options.toDisplayMode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -463,7 +471,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
var be *controllererrors.BuildError
|
var be *controllererrors.BuildError
|
||||||
if errors.As(err, &be) {
|
if errors.As(err, &be) {
|
||||||
ref = be.Ref
|
ref = be.SessionID
|
||||||
retErr = err
|
retErr = err
|
||||||
// We can proceed to monitor
|
// We can proceed to monitor
|
||||||
} else {
|
} else {
|
||||||
@@ -590,7 +598,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
|||||||
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||||
|
|
||||||
@@ -720,7 +728,7 @@ type commonFlags struct {
|
|||||||
|
|
||||||
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
|
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
|
||||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "quiet", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
|
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
|
||||||
}
|
}
|
||||||
@@ -737,15 +745,15 @@ func checkWarnedFlags(f *pflag.Flag) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
func writeMetadataFile(filename string, dt any) error {
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
b, err := json.MarshalIndent(dt, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
return atomicwriter.WriteFile(filename, b, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
|
||||||
decFunc := func(k, v string) ([]byte, error) {
|
decFunc := func(k, v string) ([]byte, error) {
|
||||||
if k == "result.json" {
|
if k == "result.json" {
|
||||||
// result.json is part of metadata response for subrequests which
|
// result.json is part of metadata response for subrequests which
|
||||||
@@ -754,17 +762,20 @@ func decodeExporterResponse(exporterResponse map[string]string) map[string]inter
|
|||||||
}
|
}
|
||||||
return base64.StdEncoding.DecodeString(v)
|
return base64.StdEncoding.DecodeString(v)
|
||||||
}
|
}
|
||||||
out := make(map[string]interface{})
|
out := make(map[string]any)
|
||||||
for k, v := range exporterResponse {
|
for k, v := range exporterResponse {
|
||||||
dt, err := decFunc(k, v)
|
dt, err := decFunc(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out[k] = v
|
out[k] = v
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var raw map[string]interface{}
|
var raw map[string]any
|
||||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
||||||
out[k] = v
|
var rawList []map[string]any
|
||||||
continue
|
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
|
||||||
|
out[k] = v
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out[k] = json.RawMessage(dt)
|
out[k] = json.RawMessage(dt)
|
||||||
}
|
}
|
||||||
@@ -882,7 +893,6 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
|||||||
src.Print(w)
|
src.Print(w)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "\n")
|
fmt.Fprintf(w, "\n")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func printKV(w io.Writer, k string, v interface{}) {
|
func printKV(w io.Writer, k string, v any) {
|
||||||
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
135
commands/history/import.go
Normal file
135
commands/history/import.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOptions struct {
|
||||||
|
file []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
|
||||||
|
sock, err := desktop.BuildServerAddr()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||||
|
network, addr, ok := strings.Cut(sock, "://")
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid endpoint address: %s", sock)
|
||||||
|
}
|
||||||
|
return remoteutil.DialContext(ctx, network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
|
||||||
|
if len(opts.file) == 0 {
|
||||||
|
u, err := importFrom(ctx, client, os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
} else {
|
||||||
|
for _, fn := range opts.file {
|
||||||
|
var f *os.File
|
||||||
|
var rdr io.Reader = os.Stdin
|
||||||
|
if fn != "-" {
|
||||||
|
f, err = os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open file %s", fn)
|
||||||
|
}
|
||||||
|
rdr = f
|
||||||
|
}
|
||||||
|
u, err := importFrom(ctx, client, rdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(urls) == 0 {
|
||||||
|
return errors.New("no build records found in the bundle")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, url := range urls {
|
||||||
|
fmt.Fprintln(dockerCli.Err(), url)
|
||||||
|
if i == 0 {
|
||||||
|
err = browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create request")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, errors.Errorf("failed to import build: %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var refs []string
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&refs); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to decode response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
for _, ref := range refs {
|
||||||
|
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
|
||||||
|
}
|
||||||
|
return urls, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
|
||||||
|
var options importOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "import [OPTIONS] < bundle.dockerbuild",
|
||||||
|
Short: "Import a build into Docker Desktop",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runImport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
893
commands/history/inspect.go
Normal file
893
commands/history/inspect.go
Normal file
@@ -0,0 +1,893 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/containerd/v2/core/images"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
|
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type statusT string
|
||||||
|
|
||||||
|
const (
|
||||||
|
statusComplete statusT = "completed"
|
||||||
|
statusRunning statusT = "running"
|
||||||
|
statusError statusT = "failed"
|
||||||
|
statusCanceled statusT = "canceled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type inspectOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
type inspectOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Ref string
|
||||||
|
|
||||||
|
Context string `json:",omitempty"`
|
||||||
|
Dockerfile string `json:",omitempty"`
|
||||||
|
VCSRepository string `json:",omitempty"`
|
||||||
|
VCSRevision string `json:",omitempty"`
|
||||||
|
Target string `json:",omitempty"`
|
||||||
|
Platform []string `json:",omitempty"`
|
||||||
|
KeepGitDir bool `json:",omitempty"`
|
||||||
|
|
||||||
|
NamedContexts []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
StartedAt *time.Time `json:",omitempty"`
|
||||||
|
CompletedAt *time.Time `json:",omitempty"`
|
||||||
|
Duration time.Duration `json:",omitempty"`
|
||||||
|
Status statusT `json:",omitempty"`
|
||||||
|
Error *errorOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
NumCompletedSteps int32
|
||||||
|
NumTotalSteps int32
|
||||||
|
NumCachedSteps int32
|
||||||
|
|
||||||
|
BuildArgs []keyValueOutput `json:",omitempty"`
|
||||||
|
Labels []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Config configOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Materials []materialOutput `json:",omitempty"`
|
||||||
|
Attachments []attachmentOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Errors []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type configOutput struct {
|
||||||
|
Network string `json:",omitempty"`
|
||||||
|
ExtraHosts []string `json:",omitempty"`
|
||||||
|
Hostname string `json:",omitempty"`
|
||||||
|
CgroupParent string `json:",omitempty"`
|
||||||
|
ImageResolveMode string `json:",omitempty"`
|
||||||
|
MultiPlatform bool `json:",omitempty"`
|
||||||
|
NoCache bool `json:",omitempty"`
|
||||||
|
NoCacheFilter []string `json:",omitempty"`
|
||||||
|
|
||||||
|
ShmSize string `json:",omitempty"`
|
||||||
|
Ulimit string `json:",omitempty"`
|
||||||
|
CacheMountNS string `json:",omitempty"`
|
||||||
|
DockerfileCheckConfig string `json:",omitempty"`
|
||||||
|
SourceDateEpoch string `json:",omitempty"`
|
||||||
|
SandboxHostname string `json:",omitempty"`
|
||||||
|
|
||||||
|
RestRaw []keyValueOutput `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type materialOutput struct {
|
||||||
|
URI string `json:",omitempty"`
|
||||||
|
Digests []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachmentOutput struct {
|
||||||
|
Digest string `json:",omitempty"`
|
||||||
|
Platform string `json:",omitempty"`
|
||||||
|
Type string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorOutput struct {
|
||||||
|
Code int `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Logs []string `json:",omitempty"`
|
||||||
|
Sources []byte `json:",omitempty"`
|
||||||
|
Stack []byte `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyValueOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Value string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
|
||||||
|
if sv, ok := attrs[k]; ok {
|
||||||
|
if f != nil {
|
||||||
|
v, ok := f(sv)
|
||||||
|
if ok {
|
||||||
|
*dest = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d, ok := any(dest).(*string); ok {
|
||||||
|
*d = sv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
var defaultPlatform string
|
||||||
|
workers, err := c.ListWorkers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to list workers")
|
||||||
|
}
|
||||||
|
workers0:
|
||||||
|
for _, w := range workers {
|
||||||
|
for _, p := range w.Platforms {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
|
||||||
|
break workers0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
|
||||||
|
attrs := rec.FrontendAttrs
|
||||||
|
delete(attrs, "frontend.caps")
|
||||||
|
|
||||||
|
var out inspectOutput
|
||||||
|
|
||||||
|
var context string
|
||||||
|
var dockerfile string
|
||||||
|
if st != nil {
|
||||||
|
context = st.LocalPath
|
||||||
|
dockerfile = st.DockerfilePath
|
||||||
|
wd, _ := os.Getwd()
|
||||||
|
|
||||||
|
if dockerfile != "" && dockerfile != "-" {
|
||||||
|
if rel, err := filepath.Rel(context, dockerfile); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
dockerfile = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if context != "" {
|
||||||
|
if rel, err := filepath.Rel(wd, context); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
context = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := attrs["context"]; ok && context == "" {
|
||||||
|
delete(attrs, "context")
|
||||||
|
context = v
|
||||||
|
}
|
||||||
|
if dockerfile == "" {
|
||||||
|
if v, ok := attrs["filename"]; ok {
|
||||||
|
dockerfile = v
|
||||||
|
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
|
||||||
|
dockerfile = filepath.Join(dfdir, dockerfile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, "filename")
|
||||||
|
|
||||||
|
out.Name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out.Ref = rec.Ref
|
||||||
|
|
||||||
|
out.Context = context
|
||||||
|
out.Dockerfile = dockerfile
|
||||||
|
|
||||||
|
if _, ok := attrs["context"]; !ok {
|
||||||
|
if src, ok := attrs["vcs:source"]; ok {
|
||||||
|
out.VCSRepository = src
|
||||||
|
}
|
||||||
|
if rev, ok := attrs["vcs:revision"]; ok {
|
||||||
|
out.VCSRevision = rev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
readAttr(attrs, "target", &out.Target, nil)
|
||||||
|
|
||||||
|
readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
var pp []string
|
||||||
|
for _, v := range strings.Split(v, ",") {
|
||||||
|
p, err := platforms.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
|
||||||
|
}
|
||||||
|
if len(pp) == 0 {
|
||||||
|
pp = append(pp, defaultPlatform)
|
||||||
|
}
|
||||||
|
return pp, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
|
||||||
|
out.NamedContexts = readKeyValues(attrs, "context:")
|
||||||
|
|
||||||
|
if rec.CreatedAt != nil {
|
||||||
|
tm := rec.CreatedAt.AsTime().Local()
|
||||||
|
out.StartedAt = &tm
|
||||||
|
}
|
||||||
|
out.Status = statusRunning
|
||||||
|
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
tm := rec.CompletedAt.AsTime().Local()
|
||||||
|
out.CompletedAt = &tm
|
||||||
|
out.Status = statusComplete
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Error != nil || rec.ExternalError != nil {
|
||||||
|
out.Error = &errorOutput{}
|
||||||
|
if rec.Error != nil {
|
||||||
|
if codes.Code(rec.Error.Code) == codes.Canceled {
|
||||||
|
out.Status = statusCanceled
|
||||||
|
} else {
|
||||||
|
out.Status = statusError
|
||||||
|
}
|
||||||
|
out.Error.Code = int(codes.Code(rec.Error.Code))
|
||||||
|
out.Error.Message = rec.Error.Message
|
||||||
|
}
|
||||||
|
if rec.ExternalError != nil {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
var st spb.Status
|
||||||
|
if err := proto.Unmarshal(dt, &st); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
|
||||||
|
var errsources bytes.Buffer
|
||||||
|
for _, s := range errdefs.Sources(retErr) {
|
||||||
|
s.Print(&errsources)
|
||||||
|
errsources.WriteString("\n")
|
||||||
|
}
|
||||||
|
out.Error.Sources = errsources.Bytes()
|
||||||
|
var ve *errdefs.VertexError
|
||||||
|
if errors.As(retErr, &ve) {
|
||||||
|
dgst, err := digest.Parse(ve.Vertex.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
|
||||||
|
}
|
||||||
|
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
|
||||||
|
}
|
||||||
|
out.Error.Name = name
|
||||||
|
out.Error.Logs = logs
|
||||||
|
}
|
||||||
|
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.StartedAt != nil {
|
||||||
|
if out.CompletedAt != nil {
|
||||||
|
out.Duration = out.CompletedAt.Sub(*out.StartedAt)
|
||||||
|
} else {
|
||||||
|
out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.NumCompletedSteps = rec.NumCompletedSteps
|
||||||
|
out.NumTotalSteps = rec.NumTotalSteps
|
||||||
|
out.NumCachedSteps = rec.NumCachedSteps
|
||||||
|
|
||||||
|
out.BuildArgs = readKeyValues(attrs, "build-arg:")
|
||||||
|
out.Labels = readKeyValues(attrs, "label:")
|
||||||
|
|
||||||
|
readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
|
||||||
|
readAttr(attrs, "hostname", &out.Config.Hostname, nil)
|
||||||
|
readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
|
||||||
|
readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
return false, false
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return strings.Split(v, ","), true
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fields, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
|
||||||
|
readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
|
||||||
|
readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
|
||||||
|
readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
|
||||||
|
|
||||||
|
var unusedAttrs []keyValueOutput
|
||||||
|
for k := range attrs {
|
||||||
|
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
unusedAttrs = append(unusedAttrs, keyValueOutput{
|
||||||
|
Name: k,
|
||||||
|
Value: attrs[k],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
out.Config.RestRaw = unusedAttrs
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
|
||||||
|
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
|
||||||
|
})
|
||||||
|
if provIndex != -1 {
|
||||||
|
prov := attachments[provIndex]
|
||||||
|
dt, err := content.ReadBlob(ctx, store, prov.descr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
var pred provenancetypes.ProvenancePredicate
|
||||||
|
if err := json.Unmarshal(dt, &pred); err != nil {
|
||||||
|
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
for _, m := range pred.Materials {
|
||||||
|
out.Materials = append(out.Materials, materialOutput{
|
||||||
|
URI: m.URI,
|
||||||
|
Digests: digestSetToDigests(m.Digest),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(attachments) > 0 {
|
||||||
|
for _, a := range attachments {
|
||||||
|
p := ""
|
||||||
|
if a.platform != nil {
|
||||||
|
p = platforms.FormatAll(*a.platform)
|
||||||
|
}
|
||||||
|
out.Attachments = append(out.Attachments, attachmentOutput{
|
||||||
|
Digest: a.descr.Digest.String(),
|
||||||
|
Platform: p,
|
||||||
|
Type: descrType(a.descr),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.format == formatter.JSONFormatKey {
|
||||||
|
enc := json.NewEncoder(dockerCli.Out())
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(out)
|
||||||
|
} else if opts.format != formatter.PrettyFormatKey {
|
||||||
|
tmpl, err := template.New("inspect").Parse(opts.format)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse format template")
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := tmpl.Execute(&buf, out); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to execute format template")
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), buf.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Name != "" {
|
||||||
|
fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
|
||||||
|
}
|
||||||
|
if opts.ref == "" && out.Ref != "" {
|
||||||
|
fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
|
||||||
|
}
|
||||||
|
if out.Context != "" {
|
||||||
|
fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
|
||||||
|
}
|
||||||
|
if out.Dockerfile != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
|
||||||
|
}
|
||||||
|
if out.VCSRepository != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
|
||||||
|
}
|
||||||
|
if out.VCSRevision != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Target != "" {
|
||||||
|
fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Platform) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.KeepGitDir {
|
||||||
|
fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
|
||||||
|
var statusStr string
|
||||||
|
if out.Status == statusRunning {
|
||||||
|
statusStr = " (running)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
|
||||||
|
|
||||||
|
if out.Status == statusError {
|
||||||
|
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
|
||||||
|
} else if out.Status == statusCanceled {
|
||||||
|
fmt.Fprintf(tw, "Status:\tCanceled\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Config.Network != "" {
|
||||||
|
fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
|
||||||
|
}
|
||||||
|
if out.Config.Hostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
|
||||||
|
}
|
||||||
|
if len(out.Config.ExtraHosts) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
|
||||||
|
}
|
||||||
|
if out.Config.CgroupParent != "" {
|
||||||
|
fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
|
||||||
|
}
|
||||||
|
if out.Config.ImageResolveMode != "" {
|
||||||
|
fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
|
||||||
|
}
|
||||||
|
if out.Config.MultiPlatform {
|
||||||
|
fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
|
||||||
|
}
|
||||||
|
if out.Config.NoCache {
|
||||||
|
fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
|
||||||
|
}
|
||||||
|
if len(out.Config.NoCacheFilter) > 0 {
|
||||||
|
fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Config.ShmSize != "" {
|
||||||
|
fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
|
||||||
|
}
|
||||||
|
if out.Config.Ulimit != "" {
|
||||||
|
fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
|
||||||
|
}
|
||||||
|
if out.Config.CacheMountNS != "" {
|
||||||
|
fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
|
||||||
|
}
|
||||||
|
if out.Config.DockerfileCheckConfig != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
|
||||||
|
}
|
||||||
|
if out.Config.SourceDateEpoch != "" {
|
||||||
|
fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
|
||||||
|
}
|
||||||
|
if out.Config.SandboxHostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range out.Config.RestRaw {
|
||||||
|
fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
|
||||||
|
printTable(dockerCli.Out(), out.Labels, "Label")
|
||||||
|
|
||||||
|
if len(out.Materials) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Materials:")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "URI\tDIGEST\n")
|
||||||
|
for _, m := range out.Materials {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Attachments) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Attachments:\n")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
|
||||||
|
for _, a := range out.Attachments {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Error != nil {
|
||||||
|
if out.Error.Sources != nil {
|
||||||
|
fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
|
||||||
|
}
|
||||||
|
if len(out.Error.Logs) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Logs:")
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
|
||||||
|
for _, l := range out.Error.Logs {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "> "+l)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
if len(out.Error.Stack) > 0 {
|
||||||
|
if debug.IsEnabled() {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options inspectOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "inspect [OPTIONS] [REF]",
|
||||||
|
Short: "Inspect a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runInspect(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
attachmentCmd(dockerCli, rootOpts),
|
||||||
|
)
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
|
||||||
|
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
var logs []string
|
||||||
|
lastState := map[int]int{}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
st.CloseSend()
|
||||||
|
return "", nil, context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := st.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
ss := client.NewSolveStatus(ev)
|
||||||
|
for _, v := range ss.Vertexes {
|
||||||
|
if v.Digest == dgst {
|
||||||
|
name = v.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, l := range ss.Logs {
|
||||||
|
if l.Vertex == dgst {
|
||||||
|
parts := bytes.Split(l.Data, []byte("\n"))
|
||||||
|
for i, p := range parts {
|
||||||
|
var wrote bool
|
||||||
|
if i == 0 {
|
||||||
|
idx, ok := lastState[l.Stream]
|
||||||
|
if ok && idx != -1 {
|
||||||
|
logs[idx] = logs[idx] + string(p)
|
||||||
|
wrote = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !wrote {
|
||||||
|
if len(p) > 0 {
|
||||||
|
logs = append(logs, string(p))
|
||||||
|
}
|
||||||
|
lastState[l.Stream] = len(logs) - 1
|
||||||
|
}
|
||||||
|
if i == len(parts)-1 && len(p) == 0 {
|
||||||
|
lastState[l.Stream] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit > 0 && len(logs) > limit {
|
||||||
|
logs = logs[len(logs)-limit:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return name, logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachment struct {
|
||||||
|
platform *ocispecs.Platform
|
||||||
|
descr ocispecs.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
|
||||||
|
var attachments []attachment
|
||||||
|
|
||||||
|
if rec.Result != nil {
|
||||||
|
for _, a := range rec.Result.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range rec.Result.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, ri := range rec.Results {
|
||||||
|
p, err := platforms.Parse(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, a := range ri.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
platform: &p,
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range ri.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(attachments, func(a, b attachment) int {
|
||||||
|
pCmp := 0
|
||||||
|
if a.platform == nil && b.platform != nil {
|
||||||
|
return -1
|
||||||
|
} else if a.platform != nil && b.platform == nil {
|
||||||
|
return 1
|
||||||
|
} else if a.platform != nil && b.platform != nil {
|
||||||
|
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
|
||||||
|
}
|
||||||
|
return cmp.Or(
|
||||||
|
pCmp,
|
||||||
|
cmp.Compare(descrType(a.descr), descrType(b.descr)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
return attachments, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
|
||||||
|
_, err := store.Info(ctx, desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []attachment
|
||||||
|
|
||||||
|
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
|
||||||
|
out = append(out, attachment{platform: platform, descr: desc})
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := content.ReadBlob(ctx, store, desc)
|
||||||
|
if err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var idx ocispecs.Index
|
||||||
|
if err := json.Unmarshal(dt, &idx); err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range idx.Manifests {
|
||||||
|
p := platform
|
||||||
|
if d.Platform != nil {
|
||||||
|
p = d.Platform
|
||||||
|
}
|
||||||
|
out = append(out, walkAttachments(ctx, store, d, p)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
|
||||||
|
return ocispecs.Descriptor{
|
||||||
|
MediaType: in.MediaType,
|
||||||
|
Digest: digest.Digest(in.Digest),
|
||||||
|
Size: in.Size,
|
||||||
|
Annotations: in.Annotations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func descrType(desc ocispecs.Descriptor) string {
|
||||||
|
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
return desc.MediaType
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
|
||||||
|
v, err := f(s)
|
||||||
|
if err != nil {
|
||||||
|
errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
|
||||||
|
*errs = append(*errs, errStr)
|
||||||
|
}
|
||||||
|
return v, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTable(w io.Writer, kvs []keyValueOutput, title string) {
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
|
||||||
|
for _, k := range kvs {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
|
||||||
|
var out []keyValueOutput
|
||||||
|
for k, v := range attrs {
|
||||||
|
if strings.HasPrefix(k, prefix) {
|
||||||
|
out = append(out, keyValueOutput{
|
||||||
|
Name: strings.TrimPrefix(k, prefix),
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
slices.SortFunc(out, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func digestSetToDigests(ds slsa.DigestSet) []string {
|
||||||
|
var out []string
|
||||||
|
for k, v := range ds {
|
||||||
|
out = append(out, fmt.Sprintf("%s:%s", k, v))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
145
commands/history/inspect_attachment.go
Normal file
145
commands/history/inspect_attachment.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
intoto "github.com/in-toto/in-toto-golang/in_toto"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type attachmentOptions struct {
|
||||||
|
builder string
|
||||||
|
typ string
|
||||||
|
platform string
|
||||||
|
ref string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
if opts.digest != "" {
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := opts.typ
|
||||||
|
switch typ {
|
||||||
|
case "index":
|
||||||
|
typ = ocispecs.MediaTypeImageIndex
|
||||||
|
case "manifest":
|
||||||
|
typ = ocispecs.MediaTypeImageManifest
|
||||||
|
case "image":
|
||||||
|
typ = ocispecs.MediaTypeImageConfig
|
||||||
|
case "provenance":
|
||||||
|
typ = slsa02.PredicateSLSAProvenance
|
||||||
|
case "sbom":
|
||||||
|
typ = intoto.PredicateSPDX
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range attachments {
|
||||||
|
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if typ != "" && descrType(a.descr) != typ {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ra, err := store.ReaderAt(ctx, a.descr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options attachmentOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "attachment [OPTIONS] REF [DIGEST]",
|
||||||
|
Short: "Inspect a build attachment",
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
dgst, err := digest.Parse(args[1])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid digest %q", args[1])
|
||||||
|
}
|
||||||
|
options.digest = dgst
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.digest == "" && options.platform == "" && options.typ == "" {
|
||||||
|
return errors.New("at least one of --type, --platform or DIGEST must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runAttachment(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.typ, "type", "", "Type of attachment")
|
||||||
|
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
117
commands/history/logs.go
Normal file
117
commands/history/logs.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logsOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
progress string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
|
||||||
|
if mode == progressui.AutoMode {
|
||||||
|
mode = progressui.PlainMode
|
||||||
|
}
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
cl.CloseSend()
|
||||||
|
return context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := cl.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
printer.Write(client.NewSolveStatus(ev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return printer.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options logsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "logs [OPTIONS] [REF]",
|
||||||
|
Short: "Print the logs of a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLogs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
234
commands/history/ls.go
Normal file
234
commands/history/ls.go
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lsHeaderBuildID = "BUILD ID"
|
||||||
|
lsHeaderName = "NAME"
|
||||||
|
lsHeaderStatus = "STATUS"
|
||||||
|
lsHeaderCreated = "CREATED AT"
|
||||||
|
lsHeaderDuration = "DURATION"
|
||||||
|
lsHeaderLink = ""
|
||||||
|
|
||||||
|
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
|
||||||
|
|
||||||
|
headerKeyTimestamp = "buildkit-current-timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type lsOptions struct {
|
||||||
|
builder string
|
||||||
|
format string
|
||||||
|
noTrunc bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := queryRecords(ctx, "", nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, rec := range out {
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
rec.name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out[i] = rec
|
||||||
|
}
|
||||||
|
|
||||||
|
return lsPrint(dockerCli, out, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options lsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List build records",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
|
||||||
|
if in.format == formatter.TableFormatKey {
|
||||||
|
in.format = lsDefaultTableFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.Format(in.format),
|
||||||
|
Trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(records, func(a, b historyRecord) int {
|
||||||
|
if a.CompletedAt == nil && b.CompletedAt != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.CompletedAt != nil && b.CompletedAt == nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
|
for _, r := range records {
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: formatter.Format(in.format),
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
record: &r,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lsCtx := lsContext{
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
lsCtx.Header = formatter.SubHeaderContext{
|
||||||
|
"Ref": lsHeaderBuildID,
|
||||||
|
"Name": lsHeaderName,
|
||||||
|
"Status": lsHeaderStatus,
|
||||||
|
"CreatedAt": lsHeaderCreated,
|
||||||
|
"Duration": lsHeaderDuration,
|
||||||
|
"Link": lsHeaderLink,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Write(&lsCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsContext struct {
|
||||||
|
formatter.HeaderContext
|
||||||
|
|
||||||
|
isTerm bool
|
||||||
|
trunc bool
|
||||||
|
format formatter.Format
|
||||||
|
record *historyRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]any{
|
||||||
|
"ref": c.FullRef(),
|
||||||
|
"name": c.Name(),
|
||||||
|
"status": c.Status(),
|
||||||
|
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
|
||||||
|
"total_steps": c.record.NumTotalSteps,
|
||||||
|
"completed_steps": c.record.NumCompletedSteps,
|
||||||
|
"cached_steps": c.record.NumCachedSteps,
|
||||||
|
}
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Ref() string {
|
||||||
|
return c.record.Ref
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) FullRef() string {
|
||||||
|
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Name() string {
|
||||||
|
name := c.record.name
|
||||||
|
if c.trunc && c.format.IsTable() {
|
||||||
|
return trimBeginning(name, 36)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Status() string {
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
if c.record.Error != nil {
|
||||||
|
return "Error"
|
||||||
|
}
|
||||||
|
return "Completed"
|
||||||
|
}
|
||||||
|
return "Running"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) CreatedAt() string {
|
||||||
|
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Duration() string {
|
||||||
|
lastTime := c.record.currentTimestamp
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
tm := c.record.CompletedAt.AsTime()
|
||||||
|
lastTime = &tm
|
||||||
|
}
|
||||||
|
if lastTime == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
|
||||||
|
if c.record.CompletedAt == nil {
|
||||||
|
v += "+"
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Link() string {
|
||||||
|
url := desktop.BuildURL(c.FullRef())
|
||||||
|
if c.format.IsTable() {
|
||||||
|
if c.isTerm {
|
||||||
|
return desktop.ANSIHyperlink(url, "Open")
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return url
|
||||||
|
}
|
||||||
73
commands/history/open.go
Normal file
73
commands/history/open.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type openOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||||
|
return browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options openOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "open [OPTIONS] [REF]",
|
||||||
|
Short: "Open a build in Docker Desktop",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runOpen(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
151
commands/history/rm.go
Normal file
151
commands/history/rm.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rmOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make([][]error, len(opts.refs))
|
||||||
|
for i := range errs {
|
||||||
|
errs[i] = make([]error, len(nodes))
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
refs := opts.refs
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
refs = append(refs, resp.Record.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, ref := range refs {
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: ref,
|
||||||
|
Delete: true,
|
||||||
|
})
|
||||||
|
if opts.all {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errs[j][i] = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []error
|
||||||
|
loop0:
|
||||||
|
for _, nodeErrs := range errs {
|
||||||
|
var nodeErr error
|
||||||
|
for _, err1 := range nodeErrs {
|
||||||
|
if err1 == nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
if nodeErr == nil {
|
||||||
|
nodeErr = err1
|
||||||
|
} else {
|
||||||
|
nodeErr = multierror.Append(nodeErr, err1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, nodeErr)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(out) == 1 {
|
||||||
|
return out[0]
|
||||||
|
}
|
||||||
|
return multierror.Append(out[0], out[1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options rmOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm [OPTIONS] [REF...]",
|
||||||
|
Short: "Remove build records",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) == 0 && !options.all {
|
||||||
|
return errors.New("rm requires at least one argument")
|
||||||
|
}
|
||||||
|
if len(args) > 0 && options.all {
|
||||||
|
return errors.New("rm requires either --all or at least one argument")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runRm(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Remove all build records")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
32
commands/history/root.go
Normal file
32
commands/history/root.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RootOptions struct {
|
||||||
|
Builder *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "history",
|
||||||
|
Short: "Commands to work on build records",
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
RunE: rootcmd.RunE,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
lsCmd(dockerCli, opts),
|
||||||
|
rmCmd(dockerCli, opts),
|
||||||
|
logsCmd(dockerCli, opts),
|
||||||
|
inspectCmd(dockerCli, opts),
|
||||||
|
openCmd(dockerCli, opts),
|
||||||
|
traceCmd(dockerCli, opts),
|
||||||
|
importCmd(dockerCli, opts),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
228
commands/history/trace.go
Normal file
228
commands/history/trace.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/otelutil"
|
||||||
|
"github.com/docker/buildx/util/otelutil/jaeger"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
jaegerui "github.com/tonistiigi/jaeger-ui-rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type traceOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
addr string
|
||||||
|
compare string
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return "", nil, errors.New("no records found")
|
||||||
|
}
|
||||||
|
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
if rec.CompletedAt == nil {
|
||||||
|
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Trace == nil {
|
||||||
|
// build is complete but no trace yet. try to finalize the trace
|
||||||
|
time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
Finalize: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec = &recs[0]
|
||||||
|
if rec.Trace == nil {
|
||||||
|
return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
|
||||||
|
Digest: digest.Digest(rec.Trace.Digest),
|
||||||
|
MediaType: rec.Trace.MediaType,
|
||||||
|
Size: rec.Trace.Size,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapper := struct {
|
||||||
|
Data []jaeger.Trace `json:"data"`
|
||||||
|
}{
|
||||||
|
Data: spans.JaegerData().Data,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(wrapper.Data) == 0 {
|
||||||
|
return "", nil, errors.New("no trace data")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
if err := enc.Encode(wrapper); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
traceID, data, err := loadTrace(ctx, opts.ref, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srv := jaegerui.NewServer(jaegerui.Config{})
|
||||||
|
if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url := "/trace/" + traceID
|
||||||
|
|
||||||
|
if opts.compare != "" {
|
||||||
|
traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
|
||||||
|
}
|
||||||
|
if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url = "/trace/" + traceIDcomp + "..." + traceID
|
||||||
|
}
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !term && opts.compare == "" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), string(data))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ln, err := net.Listen("tcp", opts.addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
browser.OpenURL(url)
|
||||||
|
}()
|
||||||
|
|
||||||
|
url = "http://" + ln.Addr().String() + url
|
||||||
|
fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
ln.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = srv.Serve(ln)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options traceOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "trace [OPTIONS] [REF]",
|
||||||
|
Short: "Show the OpenTelemetry trace of a build record",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runTrace(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
|
||||||
|
flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
221
commands/history/utils.go
Normal file
221
commands/history/utils.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||||
|
var res string
|
||||||
|
|
||||||
|
var target, contextPath, dockerfilePath, vcsSource string
|
||||||
|
if v, ok := fattrs["target"]; ok {
|
||||||
|
target = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["context"]; ok {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:source"]; ok {
|
||||||
|
vcsSource = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
|
||||||
|
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
var localPath string
|
||||||
|
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
|
||||||
|
if ls.LocalPath != "" && ls.LocalPath != "-" {
|
||||||
|
localPath = filepath.ToSlash(ls.LocalPath)
|
||||||
|
}
|
||||||
|
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove default dockerfile name
|
||||||
|
const defaultFilename = "/Dockerfile"
|
||||||
|
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
|
||||||
|
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
|
||||||
|
|
||||||
|
// dockerfile is a subpath of context
|
||||||
|
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
|
||||||
|
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
|
||||||
|
} else {
|
||||||
|
// Otherwise, use basename
|
||||||
|
bpath := localPath
|
||||||
|
if len(dockerfilePath) > 0 {
|
||||||
|
bpath = dockerfilePath
|
||||||
|
}
|
||||||
|
if len(bpath) > 0 {
|
||||||
|
lidx := strings.LastIndex(bpath, "/")
|
||||||
|
res = bpath[lidx+1:]
|
||||||
|
if !hasDefaultFileName {
|
||||||
|
if lidx != -1 {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
|
||||||
|
} else {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(contextPath) > 0 {
|
||||||
|
res = contextPath
|
||||||
|
}
|
||||||
|
if len(target) > 0 {
|
||||||
|
if len(res) > 0 {
|
||||||
|
res = res + " (" + target + ")"
|
||||||
|
} else {
|
||||||
|
res = target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res == "" && vcsSource != "" {
|
||||||
|
return vcsSource
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimBeginning(s string, n int) string {
|
||||||
|
if len(s) <= n {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return ".." + s[len(s)-n+2:]
|
||||||
|
}
|
||||||
|
|
||||||
|
type historyRecord struct {
|
||||||
|
*controlapi.BuildHistoryRecord
|
||||||
|
currentTimestamp *time.Time
|
||||||
|
node *builder.Node
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type queryOptions struct {
|
||||||
|
CompletedOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
|
||||||
|
var mu sync.Mutex
|
||||||
|
var out []historyRecord
|
||||||
|
|
||||||
|
var offset *int
|
||||||
|
if strings.HasPrefix(ref, "^") {
|
||||||
|
off, err := strconv.Atoi(ref[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid offset %q", ref)
|
||||||
|
}
|
||||||
|
offset = &off
|
||||||
|
ref = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for _, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var records []historyRecord
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
md, err := serv.Header()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var ts *time.Time
|
||||||
|
if v, ok := md[headerKeyTimestamp]; ok {
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, v[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ts = &t
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
for {
|
||||||
|
he, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
records = append(records, historyRecord{
|
||||||
|
BuildHistoryRecord: he.Record,
|
||||||
|
currentTimestamp: ts,
|
||||||
|
node: &node,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
out = append(out, records...)
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(out, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
if offset != nil {
|
||||||
|
var filtered []historyRecord
|
||||||
|
for _, r := range out {
|
||||||
|
if *offset > 0 {
|
||||||
|
*offset--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, r)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if *offset > 0 {
|
||||||
|
return nil, errors.Errorf("no completed build found with offset %d", *offset)
|
||||||
|
}
|
||||||
|
out = filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||||
|
}
|
||||||
@@ -42,7 +42,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileArgs := make([]string, len(in.files))
|
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||||
for i, f := range in.files {
|
for i, f := range in.files {
|
||||||
dt, err := os.ReadFile(f)
|
dt, err := os.ReadFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -173,8 +173,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imageopt)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -194,7 +194,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
}
|
}
|
||||||
s := s
|
s := s
|
||||||
eg2.Go(func() error {
|
eg2.Go(func() error {
|
||||||
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
|
||||||
return r.Copy(ctx, s, t)
|
return r.Copy(ctx, s, t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -202,7 +202,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
if err := eg2.Wait(); err != nil {
|
if err := eg2.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
|
||||||
return r.Push(ctx, t, desc, dt)
|
return r.Push(ctx, t, desc, dt)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,8 +35,9 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
@@ -113,6 +115,25 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||||||
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(nodes[i].CDIDevices) > 0 {
|
||||||
|
fmt.Fprintf(w, "Devices:\n")
|
||||||
|
for _, dev := range nodes[i].CDIDevices {
|
||||||
|
fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
|
||||||
|
if dev.OnDemand {
|
||||||
|
fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
|
||||||
|
}
|
||||||
|
if len(dev.Annotations) > 0 {
|
||||||
|
fmt.Fprintf(w, "\tAnnotations:\n")
|
||||||
|
for k, v := range dev.Annotations {
|
||||||
|
fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for ri, rule := range nodes[i].GCPolicy {
|
for ri, rule := range nodes[i].GCPolicy {
|
||||||
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
||||||
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -57,8 +58,9 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
@@ -157,6 +159,9 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if ctx.Format.IsJSON() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
for _, n := range b.Nodes() {
|
for _, n := range b.Nodes() {
|
||||||
if n.Err != nil {
|
if n.Err != nil {
|
||||||
if ctx.Format.IsTable() {
|
if ctx.Format.IsTable() {
|
||||||
@@ -319,7 +324,7 @@ func (tp truncatedPlatforms) String() string {
|
|||||||
if tpf, ok := tp.res[mpf]; ok {
|
if tpf, ok := tp.res[mpf]; ok {
|
||||||
seen[mpf] = struct{}{}
|
seen[mpf] = struct{}{}
|
||||||
if len(tpf) == 1 {
|
if len(tpf) == 1 {
|
||||||
out = append(out, fmt.Sprintf("%s", tpf[0]))
|
out = append(out, tpf[0])
|
||||||
count++
|
count++
|
||||||
} else {
|
} else {
|
||||||
hasPreferredPlatform := false
|
hasPreferredPlatform := false
|
||||||
@@ -347,7 +352,7 @@ func (tp truncatedPlatforms) String() string {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(tp.res[mpf]) == 1 {
|
if len(tp.res[mpf]) == 1 {
|
||||||
out = append(out, fmt.Sprintf("%s", tp.res[mpf][0]))
|
out = append(out, tp.res[mpf][0])
|
||||||
count++
|
count++
|
||||||
} else {
|
} else {
|
||||||
hasPreferredPlatform := false
|
hasPreferredPlatform := false
|
||||||
|
|||||||
@@ -16,6 +16,9 @@ import (
|
|||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
pb "github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -107,6 +110,17 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// check if the client supports newer prune options
|
||||||
|
if opts.maxUsedSpace.Value() != 0 || opts.minFreeSpace.Value() != 0 {
|
||||||
|
caps, err := loadLLBCaps(ctx, c)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to load buildkit capabilities for prune")
|
||||||
|
}
|
||||||
|
if caps.Supports(pb.CapGCFreeSpaceFilter) != nil {
|
||||||
|
return errors.New("buildkit v0.17.0+ is required for max-used-space and min-free-space filters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
popts := []client.PruneOption{
|
popts := []client.PruneOption{
|
||||||
client.WithKeepOpt(pi.KeepDuration, opts.reservedSpace.Value(), opts.maxUsedSpace.Value(), opts.minFreeSpace.Value()),
|
client.WithKeepOpt(pi.KeepDuration, opts.reservedSpace.Value(), opts.maxUsedSpace.Value(), opts.minFreeSpace.Value()),
|
||||||
client.WithFilter(pi.Filter),
|
client.WithFilter(pi.Filter),
|
||||||
@@ -133,6 +147,17 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadLLBCaps(ctx context.Context, c *client.Client) (apicaps.CapSet, error) {
|
||||||
|
var caps apicaps.CapSet
|
||||||
|
_, err := c.Build(ctx, client.SolveOpt{
|
||||||
|
Internal: true,
|
||||||
|
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
caps = c.BuildOpts().LLBCaps
|
||||||
|
return nil, nil
|
||||||
|
}, nil)
|
||||||
|
return caps, err
|
||||||
|
}
|
||||||
|
|
||||||
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||||
|
|
||||||
|
|||||||
@@ -150,8 +150,9 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
debugcmd "github.com/docker/buildx/commands/debug"
|
debugcmd "github.com/docker/buildx/commands/debug"
|
||||||
|
historycmd "github.com/docker/buildx/commands/history"
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/controller/remote"
|
"github.com/docker/buildx/controller/remote"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
@@ -106,6 +107,7 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
|||||||
pruneCmd(dockerCli, opts),
|
pruneCmd(dockerCli, opts),
|
||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||||
|
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
|
||||||
)
|
)
|
||||||
if confutil.IsExperimental() {
|
if confutil.IsExperimental() {
|
||||||
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||||
|
|||||||
@@ -46,7 +46,6 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,9 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.Buil
|
|||||||
opts.Platforms = platforms
|
opts.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := dockerCli.ConfigFile()
|
dockerConfig := dockerCli.ConfigFile()
|
||||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
|
||||||
|
ConfigFile: dockerConfig,
|
||||||
|
}))
|
||||||
|
|
||||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -93,7 +95,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.Buil
|
|||||||
}
|
}
|
||||||
opts.Session = append(opts.Session, ssh)
|
opts.Session = append(opts.Session, ssh)
|
||||||
|
|
||||||
outputs, err := controllerapi.CreateExports(in.Exports)
|
outputs, _, err := controllerapi.CreateExports(in.Exports)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ import (
|
|||||||
type BuildxController interface {
|
type BuildxController interface {
|
||||||
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
|
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||||
// Invoke starts an IO session into the specified process.
|
// Invoke starts an IO session into the specified process.
|
||||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
// If pid doesn't match to any running processes, it starts a new process with the specified config.
|
||||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
|
||||||
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
||||||
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
||||||
Kill(ctx context.Context) error
|
Kill(ctx context.Context) error
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||||||
|
|
||||||
// Attach containerIn to this process
|
// Attach containerIn to this process
|
||||||
ioCancelledCh := make(chan struct{})
|
ioCancelledCh := make(chan struct{})
|
||||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func() { close(ioCancelledCh) })
|
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func(error) { close(ioCancelledCh) })
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ioCancelledCh:
|
case <-ioCancelledCh:
|
||||||
@@ -117,7 +117,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||||||
case err := <-proc.Done():
|
case err := <-proc.Done():
|
||||||
return err
|
return err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,15 +10,16 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
|
||||||
var outs []client.ExportEntry
|
var outs []client.ExportEntry
|
||||||
|
var localPaths []string
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
var stdoutUsed bool
|
var stdoutUsed bool
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "" {
|
if entry.Type == "" {
|
||||||
return nil, errors.Errorf("type is required for output")
|
return nil, nil, errors.Errorf("type is required for output")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := client.ExportEntry{
|
out := client.ExportEntry{
|
||||||
@@ -45,24 +46,26 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
supportDir = !tar
|
supportDir = !tar
|
||||||
case "registry":
|
case "registry":
|
||||||
out.Type = client.ExporterImage
|
out.Type = client.ExporterImage
|
||||||
|
out.Attrs["push"] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
if supportDir {
|
if supportDir {
|
||||||
if entry.Destination == "" {
|
if entry.Destination == "" {
|
||||||
return nil, errors.Errorf("dest is required for %s exporter", out.Type)
|
return nil, nil, errors.Errorf("dest is required for %s exporter", out.Type)
|
||||||
}
|
}
|
||||||
if entry.Destination == "-" {
|
if entry.Destination == "-" {
|
||||||
return nil, errors.Errorf("dest cannot be stdout for %s exporter", out.Type)
|
return nil, nil, errors.Errorf("dest cannot be stdout for %s exporter", out.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(entry.Destination)
|
fi, err := os.Stat(entry.Destination)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return nil, errors.Wrapf(err, "invalid destination directory: %s", entry.Destination)
|
return nil, nil, errors.Wrapf(err, "invalid destination directory: %s", entry.Destination)
|
||||||
}
|
}
|
||||||
if err == nil && !fi.IsDir() {
|
if err == nil && !fi.IsDir() {
|
||||||
return nil, errors.Errorf("destination directory %s is a file", entry.Destination)
|
return nil, nil, errors.Errorf("destination directory %s is a file", entry.Destination)
|
||||||
}
|
}
|
||||||
out.OutputDir = entry.Destination
|
out.OutputDir = entry.Destination
|
||||||
|
localPaths = append(localPaths, entry.Destination)
|
||||||
}
|
}
|
||||||
if supportFile {
|
if supportFile {
|
||||||
if entry.Destination == "" && out.Type != client.ExporterDocker {
|
if entry.Destination == "" && out.Type != client.ExporterDocker {
|
||||||
@@ -70,32 +73,33 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
}
|
}
|
||||||
if entry.Destination == "-" {
|
if entry.Destination == "-" {
|
||||||
if stdoutUsed {
|
if stdoutUsed {
|
||||||
return nil, errors.Errorf("multiple outputs configured to write to stdout")
|
return nil, nil, errors.Errorf("multiple outputs configured to write to stdout")
|
||||||
}
|
}
|
||||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
return nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
return nil, nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
||||||
}
|
}
|
||||||
out.Output = wrapWriteCloser(os.Stdout)
|
out.Output = wrapWriteCloser(os.Stdout)
|
||||||
stdoutUsed = true
|
stdoutUsed = true
|
||||||
} else if entry.Destination != "" {
|
} else if entry.Destination != "" {
|
||||||
fi, err := os.Stat(entry.Destination)
|
fi, err := os.Stat(entry.Destination)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return nil, errors.Wrapf(err, "invalid destination file: %s", entry.Destination)
|
return nil, nil, errors.Wrapf(err, "invalid destination file: %s", entry.Destination)
|
||||||
}
|
}
|
||||||
if err == nil && fi.IsDir() {
|
if err == nil && fi.IsDir() {
|
||||||
return nil, errors.Errorf("destination file %s is a directory", entry.Destination)
|
return nil, nil, errors.Errorf("destination file %s is a directory", entry.Destination)
|
||||||
}
|
}
|
||||||
f, err := os.Create(entry.Destination)
|
f, err := os.Create(entry.Destination)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("failed to open %s", err)
|
return nil, nil, errors.Errorf("failed to open %s", err)
|
||||||
}
|
}
|
||||||
out.Output = wrapWriteCloser(f)
|
out.Output = wrapWriteCloser(f)
|
||||||
|
localPaths = append(localPaths, entry.Destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
}
|
}
|
||||||
return outs, nil
|
return outs, localPaths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
||||||
|
|||||||
@@ -153,7 +153,6 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
|
|
||||||
}
|
}
|
||||||
s.Paths = ps
|
s.Paths = ps
|
||||||
ssh = append(ssh, s)
|
ssh = append(ssh, s)
|
||||||
|
|||||||
@@ -22,15 +22,13 @@ func (w *writer) Write(status *client.SolveStatus) {
|
|||||||
w.ch <- ToControlStatus(status)
|
w.ch <- ToControlStatus(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) WriteBuildRef(target string, ref string) {
|
func (w *writer) WriteBuildRef(target string, ref string) {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
func (w *writer) ValidateLogSource(digest.Digest, any) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) ClearLogSource(interface{}) {}
|
func (w *writer) ClearLogSource(any) {}
|
||||||
|
|
||||||
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
||||||
resp := StatusResponse{}
|
resp := StatusResponse{}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||||
)
|
)
|
||||||
@@ -10,7 +12,7 @@ func CreateSSH(ssh []*SSH) (session.Attachable, error) {
|
|||||||
for _, ssh := range ssh {
|
for _, ssh := range ssh {
|
||||||
cfg := sshprovider.AgentConfig{
|
cfg := sshprovider.AgentConfig{
|
||||||
ID: ssh.ID,
|
ID: ssh.ID,
|
||||||
Paths: append([]string{}, ssh.Paths...),
|
Paths: slices.Clone(ssh.Paths),
|
||||||
}
|
}
|
||||||
configs = append(configs, cfg)
|
configs = append(configs, cfg)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,16 +18,16 @@ type Process struct {
|
|||||||
invokeConfig *pb.InvokeConfig
|
invokeConfig *pb.InvokeConfig
|
||||||
errCh chan error
|
errCh chan error
|
||||||
processCancel func()
|
processCancel func()
|
||||||
serveIOCancel func()
|
serveIOCancel func(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForwardIO forwards process's io to the specified reader/writer.
|
// ForwardIO forwards process's io to the specified reader/writer.
|
||||||
// Optionally specify ioCancelCallback which will be called when
|
// Optionally specify ioCancelCallback which will be called when
|
||||||
// the process closes the specified IO. This will be useful for additional cleanup.
|
// the process closes the specified IO. This will be useful for additional cleanup.
|
||||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func()) {
|
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func(error)) {
|
||||||
p.inEnd.SetIn(in)
|
p.inEnd.SetIn(in)
|
||||||
if f := p.serveIOCancel; f != nil {
|
if f := p.serveIOCancel; f != nil {
|
||||||
f()
|
f(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
p.serveIOCancel = ioCancelCallback
|
p.serveIOCancel = ioCancelCallback
|
||||||
}
|
}
|
||||||
@@ -39,7 +39,7 @@ func (p *Process) Done() <-chan error {
|
|||||||
return p.errCh
|
return p.errCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manager manages a set of proceses.
|
// Manager manages a set of processes.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
container atomic.Value
|
container atomic.Value
|
||||||
processes sync.Map
|
processes sync.Map
|
||||||
@@ -124,9 +124,16 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||||||
f.SetOut(&out)
|
f.SetOut(&out)
|
||||||
|
|
||||||
// Register process
|
// Register process
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||||
var cancelOnce sync.Once
|
var cancelOnce sync.Once
|
||||||
processCancelFunc := func() { cancelOnce.Do(func() { cancel(); f.Close(); in.Close(); out.Close() }) }
|
processCancelFunc := func() {
|
||||||
|
cancelOnce.Do(func() {
|
||||||
|
cancel(errors.WithStack(context.Canceled))
|
||||||
|
f.Close()
|
||||||
|
in.Close()
|
||||||
|
out.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
p := &Process{
|
p := &Process{
|
||||||
inEnd: f,
|
inEnd: f,
|
||||||
invokeConfig: cfg,
|
invokeConfig: cfg,
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/defaults"
|
"github.com/containerd/containerd/v2/defaults"
|
||||||
"github.com/containerd/containerd/pkg/dialer"
|
"github.com/containerd/containerd/v2/pkg/dialer"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/controller/pb"
|
"github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
|||||||
@@ -62,9 +62,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||||||
serverRoot := filepath.Join(rootDir, "shared")
|
serverRoot := filepath.Join(rootDir, "shared")
|
||||||
|
|
||||||
// connect to buildx server if it is already running
|
// connect to buildx server if it is already running
|
||||||
ctx2, cancel := context.WithTimeout(ctx, 1*time.Second)
|
ctx2, cancel := context.WithCancelCause(ctx)
|
||||||
|
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, context.DeadlineExceeded) {
|
if !errors.Is(err, context.DeadlineExceeded) {
|
||||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
||||||
@@ -90,9 +91,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||||||
go wait()
|
go wait()
|
||||||
|
|
||||||
// wait for buildx server to be ready
|
// wait for buildx server to be ready
|
||||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
ctx2, cancel = context.WithCancelCause(ctx)
|
||||||
|
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
return errors.Wrap(err, "cannot connect to the buildx server")
|
||||||
}
|
}
|
||||||
@@ -138,7 +140,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pidF := filepath.Join(root, defaultPIDFilename)
|
pidF := filepath.Join(root, defaultPIDFilename)
|
||||||
if err := os.WriteFile(pidF, []byte(fmt.Sprintf("%d", os.Getpid())), 0600); err != nil {
|
if err := os.WriteFile(pidF, fmt.Appendf(nil, "%d", os.Getpid()), 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@@ -302,7 +302,6 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
|||||||
out = cfg.stderr
|
out = cfg.stderr
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("unsupported fd %d", file.Fd)
|
return errors.Errorf("unsupported fd %d", file.Fd)
|
||||||
|
|
||||||
}
|
}
|
||||||
if out == nil {
|
if out == nil {
|
||||||
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
||||||
@@ -345,7 +344,7 @@ func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
|
|||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
return nil, err
|
return nil, err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ type Server struct {
|
|||||||
type session struct {
|
type session struct {
|
||||||
buildOnGoing atomic.Bool
|
buildOnGoing atomic.Bool
|
||||||
statusChan chan *pb.StatusResponse
|
statusChan chan *pb.StatusResponse
|
||||||
cancelBuild func()
|
cancelBuild func(error)
|
||||||
buildOptions *pb.BuildOptions
|
buildOptions *pb.BuildOptions
|
||||||
inputPipe *io.PipeWriter
|
inputPipe *io.PipeWriter
|
||||||
|
|
||||||
@@ -109,7 +109,7 @@ func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res
|
|||||||
m.sessionMu.Lock()
|
m.sessionMu.Lock()
|
||||||
if s, ok := m.session[sessionID]; ok {
|
if s, ok := m.session[sessionID]; ok {
|
||||||
if s.cancelBuild != nil {
|
if s.cancelBuild != nil {
|
||||||
s.cancelBuild()
|
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
s.cancelRunningProcesses()
|
s.cancelRunningProcesses()
|
||||||
if s.result != nil {
|
if s.result != nil {
|
||||||
@@ -127,7 +127,7 @@ func (m *Server) Close() error {
|
|||||||
for k := range m.session {
|
for k := range m.session {
|
||||||
if s, ok := m.session[k]; ok {
|
if s, ok := m.session[k]; ok {
|
||||||
if s.cancelBuild != nil {
|
if s.cancelBuild != nil {
|
||||||
s.cancelBuild()
|
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
s.cancelRunningProcesses()
|
s.cancelRunningProcesses()
|
||||||
}
|
}
|
||||||
@@ -199,8 +199,8 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||||||
pw := pb.NewProgressWriter(statusChan)
|
pw := pb.NewProgressWriter(statusChan)
|
||||||
|
|
||||||
// Build the specified request
|
// Build the specified request
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||||
m.sessionMu.Lock()
|
m.sessionMu.Lock()
|
||||||
if s, ok := m.session[sessionID]; ok {
|
if s, ok := m.session[sessionID]; ok {
|
||||||
@@ -341,7 +341,7 @@ func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
|
|||||||
select {
|
select {
|
||||||
case msg = <-msgCh:
|
case msg = <-msgCh:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return errors.Wrap(ctx.Err(), "canceled")
|
return context.Cause(ctx)
|
||||||
}
|
}
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -370,9 +370,9 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||||||
initDoneCh := make(chan *processes.Process)
|
initDoneCh := make(chan *processes.Process)
|
||||||
initErrCh := make(chan error)
|
initErrCh := make(chan error)
|
||||||
eg, egCtx := errgroup.WithContext(context.TODO())
|
eg, egCtx := errgroup.WithContext(context.TODO())
|
||||||
srvIOCtx, srvIOCancel := context.WithCancel(egCtx)
|
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer srvIOCancel()
|
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||||
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
@@ -418,7 +418,7 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
eg.Go(func() (rErr error) {
|
eg.Go(func() (rErr error) {
|
||||||
defer srvIOCancel()
|
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||||
// Wait for init done
|
// Wait for init done
|
||||||
var proc *processes.Process
|
var proc *processes.Process
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -41,11 +41,17 @@ target "lint" {
|
|||||||
platforms = GOLANGCI_LINT_MULTIPLATFORM != "" ? [
|
platforms = GOLANGCI_LINT_MULTIPLATFORM != "" ? [
|
||||||
"darwin/amd64",
|
"darwin/amd64",
|
||||||
"darwin/arm64",
|
"darwin/arm64",
|
||||||
|
"freebsd/amd64",
|
||||||
|
"freebsd/arm64",
|
||||||
"linux/amd64",
|
"linux/amd64",
|
||||||
"linux/arm64",
|
"linux/arm64",
|
||||||
"linux/s390x",
|
"linux/s390x",
|
||||||
"linux/ppc64le",
|
"linux/ppc64le",
|
||||||
"linux/riscv64",
|
"linux/riscv64",
|
||||||
|
"netbsd/amd64",
|
||||||
|
"netbsd/arm64",
|
||||||
|
"openbsd/amd64",
|
||||||
|
"openbsd/arm64",
|
||||||
"windows/amd64",
|
"windows/amd64",
|
||||||
"windows/arm64"
|
"windows/arm64"
|
||||||
] : []
|
] : []
|
||||||
@@ -154,6 +160,8 @@ target "binaries-cross" {
|
|||||||
platforms = [
|
platforms = [
|
||||||
"darwin/amd64",
|
"darwin/amd64",
|
||||||
"darwin/arm64",
|
"darwin/arm64",
|
||||||
|
"freebsd/amd64",
|
||||||
|
"freebsd/arm64",
|
||||||
"linux/amd64",
|
"linux/amd64",
|
||||||
"linux/arm/v6",
|
"linux/arm/v6",
|
||||||
"linux/arm/v7",
|
"linux/arm/v7",
|
||||||
@@ -161,6 +169,10 @@ target "binaries-cross" {
|
|||||||
"linux/ppc64le",
|
"linux/ppc64le",
|
||||||
"linux/riscv64",
|
"linux/riscv64",
|
||||||
"linux/s390x",
|
"linux/s390x",
|
||||||
|
"netbsd/amd64",
|
||||||
|
"netbsd/arm64",
|
||||||
|
"openbsd/amd64",
|
||||||
|
"openbsd/arm64",
|
||||||
"windows/amd64",
|
"windows/amd64",
|
||||||
"windows/arm64"
|
"windows/arm64"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ By default, Bake uses the following lookup order to find the configuration file:
|
|||||||
3. `docker-compose.yml`
|
3. `docker-compose.yml`
|
||||||
4. `docker-compose.yaml`
|
4. `docker-compose.yaml`
|
||||||
5. `docker-bake.json`
|
5. `docker-bake.json`
|
||||||
6. `docker-bake.override.json`
|
6. `docker-bake.hcl`
|
||||||
7. `docker-bake.hcl`
|
7. `docker-bake.override.json`
|
||||||
8. `docker-bake.override.hcl`
|
8. `docker-bake.override.hcl`
|
||||||
|
|
||||||
You can specify the file location explicitly using the `--file` flag:
|
You can specify the file location explicitly using the `--file` flag:
|
||||||
@@ -221,8 +221,10 @@ The following table shows the complete list of attributes that you can assign to
|
|||||||
| [`attest`](#targetattest) | List | Build attestations |
|
| [`attest`](#targetattest) | List | Build attestations |
|
||||||
| [`cache-from`](#targetcache-from) | List | External cache sources |
|
| [`cache-from`](#targetcache-from) | List | External cache sources |
|
||||||
| [`cache-to`](#targetcache-to) | List | External cache destinations |
|
| [`cache-to`](#targetcache-to) | List | External cache destinations |
|
||||||
|
| [`call`](#targetcall) | String | Specify the frontend method to call for the target. |
|
||||||
| [`context`](#targetcontext) | String | Set of files located in the specified path or URL |
|
| [`context`](#targetcontext) | String | Set of files located in the specified path or URL |
|
||||||
| [`contexts`](#targetcontexts) | Map | Additional build contexts |
|
| [`contexts`](#targetcontexts) | Map | Additional build contexts |
|
||||||
|
| [`description`](#targetdescription) | String | Description of a target |
|
||||||
| [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string |
|
| [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string |
|
||||||
| [`dockerfile`](#targetdockerfile) | String | Dockerfile location |
|
| [`dockerfile`](#targetdockerfile) | String | Dockerfile location |
|
||||||
| [`inherits`](#targetinherits) | List | Inherit attributes from other targets |
|
| [`inherits`](#targetinherits) | List | Inherit attributes from other targets |
|
||||||
@@ -283,19 +285,11 @@ The key takes a list of annotations, in the format of `KEY=VALUE`.
|
|||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
target "default" {
|
target "default" {
|
||||||
output = ["type=image,name=foo"]
|
output = [{ type = "image", name = "foo" }]
|
||||||
annotations = ["org.opencontainers.image.authors=dvdksn"]
|
annotations = ["org.opencontainers.image.authors=dvdksn"]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
is the same as
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
target "default" {
|
|
||||||
output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
By default, the annotation is added to image manifests. You can configure the
|
By default, the annotation is added to image manifests. You can configure the
|
||||||
level of the annotations by adding a prefix to the annotation, containing a
|
level of the annotations by adding a prefix to the annotation, containing a
|
||||||
comma-separated list of all the levels that you want to annotate. The following
|
comma-separated list of all the levels that you want to annotate. The following
|
||||||
@@ -303,7 +297,7 @@ example adds annotations to both the image index and manifests.
|
|||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
target "default" {
|
target "default" {
|
||||||
output = ["type=image,name=foo"]
|
output = [{ type = "image", name = "foo" }]
|
||||||
annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
|
annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -319,8 +313,13 @@ This attribute accepts the long-form CSV version of attestation parameters.
|
|||||||
```hcl
|
```hcl
|
||||||
target "default" {
|
target "default" {
|
||||||
attest = [
|
attest = [
|
||||||
"type=provenance,mode=min",
|
{
|
||||||
"type=sbom"
|
type = "provenance",
|
||||||
|
mode = "max",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type = "sbom",
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -336,8 +335,15 @@ This takes a list value, so you can specify multiple cache sources.
|
|||||||
```hcl
|
```hcl
|
||||||
target "app" {
|
target "app" {
|
||||||
cache-from = [
|
cache-from = [
|
||||||
"type=s3,region=eu-west-1,bucket=mybucket",
|
{
|
||||||
"user/repo:cache",
|
type = "s3",
|
||||||
|
region = "eu-west-1",
|
||||||
|
bucket = "mybucket"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type = "registry",
|
||||||
|
ref = "user/repo:cache"
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -353,12 +359,40 @@ This takes a list value, so you can specify multiple cache export targets.
|
|||||||
```hcl
|
```hcl
|
||||||
target "app" {
|
target "app" {
|
||||||
cache-to = [
|
cache-to = [
|
||||||
"type=s3,region=eu-west-1,bucket=mybucket",
|
{
|
||||||
"type=inline"
|
type = "s3",
|
||||||
|
region = "eu-west-1",
|
||||||
|
bucket = "mybucket"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type = "inline",
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.call`
|
||||||
|
|
||||||
|
Specifies the frontend method to use. Frontend methods let you, for example,
|
||||||
|
execute build checks only, instead of running a build. This is the same as the
|
||||||
|
`--call` flag.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "app" {
|
||||||
|
call = "check"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported values are:
|
||||||
|
|
||||||
|
- `build` builds the target (default)
|
||||||
|
- `check`: evaluates [build checks](https://docs.docker.com/build/checks/) for the target
|
||||||
|
- `outline`: displays the target's build arguments and their default values if available
|
||||||
|
- `targets`: lists all Bake targets in the loaded definition, along with its [description](#targetdescription).
|
||||||
|
|
||||||
|
For more information about frontend methods, refer to the CLI reference for
|
||||||
|
[`docker buildx build --call`](https://docs.docker.com/reference/cli/docker/buildx/build/#call).
|
||||||
|
|
||||||
### `target.context`
|
### `target.context`
|
||||||
|
|
||||||
Specifies the location of the build context to use for this target.
|
Specifies the location of the build context to use for this target.
|
||||||
@@ -466,6 +500,25 @@ FROM baseapp
|
|||||||
RUN echo "Hello world"
|
RUN echo "Hello world"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.description`
|
||||||
|
|
||||||
|
Defines a human-readable description for the target, clarifying its purpose or
|
||||||
|
functionality.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "lint" {
|
||||||
|
description = "Runs golangci-lint to detect style errors"
|
||||||
|
args = {
|
||||||
|
GOLANGCI_LINT_VERSION = null
|
||||||
|
}
|
||||||
|
dockerfile = "lint.Dockerfile"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This attribute is useful when combined with the `docker buildx bake --list=targets`
|
||||||
|
option, providing a more informative output when listing the available build
|
||||||
|
targets in a Bake file.
|
||||||
|
|
||||||
### `target.dockerfile-inline`
|
### `target.dockerfile-inline`
|
||||||
|
|
||||||
Uses the string value as an inline Dockerfile for the build target.
|
Uses the string value as an inline Dockerfile for the build target.
|
||||||
@@ -820,7 +873,7 @@ The following example configures the target to use a cache-only output,
|
|||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
target "default" {
|
target "default" {
|
||||||
output = ["type=cacheonly"]
|
output = [{ type = "cacheonly" }]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -860,8 +913,8 @@ variable "HOME" {
|
|||||||
|
|
||||||
target "default" {
|
target "default" {
|
||||||
secret = [
|
secret = [
|
||||||
"type=env,id=KUBECONFIG",
|
{ type = "env", id = "KUBECONFIG" },
|
||||||
"type=file,id=aws,src=${HOME}/.aws/credentials"
|
{ type = "file", id = "aws", src = "${HOME}/.aws/credentials" },
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -905,7 +958,7 @@ This can be useful if you need to access private repositories during a build.
|
|||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
target "default" {
|
target "default" {
|
||||||
ssh = ["default"]
|
ssh = [{ id = "default" }]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ Extended build capabilities with BuildKit
|
|||||||
| [`debug`](buildx_debug.md) | Start debugger (EXPERIMENTAL) |
|
| [`debug`](buildx_debug.md) | Start debugger (EXPERIMENTAL) |
|
||||||
| [`dial-stdio`](buildx_dial-stdio.md) | Proxy current stdio streams to builder instance |
|
| [`dial-stdio`](buildx_dial-stdio.md) | Proxy current stdio streams to builder instance |
|
||||||
| [`du`](buildx_du.md) | Disk usage |
|
| [`du`](buildx_du.md) | Disk usage |
|
||||||
|
| [`history`](buildx_history.md) | Commands to work on build records |
|
||||||
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
|
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
|
||||||
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
|
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
|
||||||
| [`ls`](buildx_ls.md) | List builder instances |
|
| [`ls`](buildx_ls.md) | List builder instances |
|
||||||
|
|||||||
@@ -13,24 +13,25 @@ Build from a file
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
|
|:------------------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------|
|
||||||
| `--allow` | `stringArray` | | Allow build to access specified resources |
|
| [`--allow`](#allow) | `stringArray` | | Allow build to access specified resources |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||||
| `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` |
|
| [`--list`](#list) | `string` | | List targets or variables |
|
||||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
| `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` |
|
||||||
| [`--no-cache`](#no-cache) | `bool` | | Do not use cache when building the image |
|
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||||
| [`--print`](#print) | `bool` | | Print the options without building |
|
| [`--no-cache`](#no-cache) | `bool` | | Do not use cache when building the image |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
| [`--print`](#print) | `bool` | | Print the options without building |
|
||||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
|
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||||
| [`--pull`](#pull) | `bool` | | Always attempt to pull all referenced images |
|
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
|
||||||
| `--push` | `bool` | | Shorthand for `--set=*.output=type=registry` |
|
| [`--pull`](#pull) | `bool` | | Always attempt to pull all referenced images |
|
||||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` |
|
| `--push` | `bool` | | Shorthand for `--set=*.output=type=registry` |
|
||||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
| [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` |
|
||||||
|
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -50,6 +51,80 @@ guide for introduction to writing bake files.
|
|||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||||
|
|
||||||
|
```text
|
||||||
|
--allow=ENTITLEMENT[=VALUE]
|
||||||
|
```
|
||||||
|
|
||||||
|
Entitlements are designed to provide controlled access to privileged
|
||||||
|
operations. By default, Buildx and BuildKit operates with restricted
|
||||||
|
permissions to protect users and their systems from unintended side effects or
|
||||||
|
security risks. The `--allow` flag explicitly grants access to additional
|
||||||
|
entitlements, making it clear when a build or bake operation requires elevated
|
||||||
|
privileges.
|
||||||
|
|
||||||
|
In addition to BuildKit's `network.host` and `security.insecure` entitlements
|
||||||
|
(see [`docker buildx build --allow`](https://docs.docker.com/reference/cli/docker/buildx/build/#allow),
|
||||||
|
Bake supports file system entitlements that grant granular control over file
|
||||||
|
system access. These are particularly useful when working with builds that need
|
||||||
|
access to files outside the default working directory.
|
||||||
|
|
||||||
|
Bake supports the following filesystem entitlements:
|
||||||
|
|
||||||
|
- `--allow fs=<path|*>` - Grant read and write access to files outside of the
|
||||||
|
working directory.
|
||||||
|
- `--allow fs.read=<path|*>` - Grant read access to files outside of the
|
||||||
|
working directory.
|
||||||
|
- `--allow fs.write=<path|*>` - Grant write access to files outside of the
|
||||||
|
working directory.
|
||||||
|
|
||||||
|
The `fs` entitlements take a path value (relative or absolute) to a directory
|
||||||
|
on the filesystem. Alternatively, you can pass a wildcard (`*`) to allow Bake
|
||||||
|
to access the entire filesystem.
|
||||||
|
|
||||||
|
### Example: fs.read
|
||||||
|
|
||||||
|
Given the following Bake configuration, Bake would need to access the parent
|
||||||
|
directory, relative to the Bake file.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "app" {
|
||||||
|
context = "../src"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Assuming `docker buildx bake app` is executed in the same directory as the
|
||||||
|
`docker-bake.hcl` file, you would need to explicitly allow Bake to read from
|
||||||
|
the `../src` directory. In this case, the following invocations all work:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --allow fs.read=* app
|
||||||
|
$ docker buildx bake --allow fs.read=../src app
|
||||||
|
$ docker buildx bake --allow fs=* app
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: fs.write
|
||||||
|
|
||||||
|
The following `docker-bake.hcl` file requires write access to the `/tmp`
|
||||||
|
directory.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "app" {
|
||||||
|
output = "/tmp"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Assuming `docker buildx bake app` is executed outside of the `/tmp` directory,
|
||||||
|
you would need to allow the `fs.write` entitlement, either by specifying the
|
||||||
|
path or using a wildcard:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --allow fs=/tmp app
|
||||||
|
$ docker buildx bake --allow fs.write=/tmp app
|
||||||
|
$ docker buildx bake --allow fs.write=* app
|
||||||
|
```
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
@@ -101,6 +176,42 @@ $ docker buildx bake -f docker-bake.dev.hcl db webapp-release
|
|||||||
See the [Bake file reference](https://docs.docker.com/build/bake/reference/)
|
See the [Bake file reference](https://docs.docker.com/build/bake/reference/)
|
||||||
for more details.
|
for more details.
|
||||||
|
|
||||||
|
### <a name="list"></a> List targets and variables (--list)
|
||||||
|
|
||||||
|
The `--list` flag displays all available targets or variables in the Bake
|
||||||
|
configuration, along with a description (if set using the `description`
|
||||||
|
property in the Bake file).
|
||||||
|
|
||||||
|
To list all targets:
|
||||||
|
|
||||||
|
```console {title="List targets"}
|
||||||
|
$ docker buildx bake --list=targets
|
||||||
|
TARGET DESCRIPTION
|
||||||
|
binaries
|
||||||
|
default binaries
|
||||||
|
update-docs
|
||||||
|
validate
|
||||||
|
validate-golangci Validate .golangci.yml schema (does not run Go linter)
|
||||||
|
```
|
||||||
|
|
||||||
|
To list variables:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --list=variables
|
||||||
|
VARIABLE VALUE DESCRIPTION
|
||||||
|
REGISTRY docker.io/username Registry and namespace
|
||||||
|
IMAGE_NAME my-app Image name
|
||||||
|
GO_VERSION <null>
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, the output of `docker buildx bake --list` is presented in a table
|
||||||
|
format. Alternatively, you can use a long-form CSV syntax and specify a
|
||||||
|
`format` attribute to output the list in JSON.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --list=type=targets,format=json
|
||||||
|
```
|
||||||
|
|
||||||
### <a name="metadata-file"></a> Write build results metadata to a file (--metadata-file)
|
### <a name="metadata-file"></a> Write build results metadata to a file (--metadata-file)
|
||||||
|
|
||||||
Similar to [`buildx build --metadata-file`](buildx_build.md#metadata-file) but
|
Similar to [`buildx build --metadata-file`](buildx_build.md#metadata-file) but
|
||||||
@@ -236,18 +347,22 @@ is defined in https://golang.org/pkg/path/#Match.
|
|||||||
```console
|
```console
|
||||||
$ docker buildx bake --set target.args.mybuildarg=value
|
$ docker buildx bake --set target.args.mybuildarg=value
|
||||||
$ docker buildx bake --set target.platform=linux/arm64
|
$ docker buildx bake --set target.platform=linux/arm64
|
||||||
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
||||||
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
||||||
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
||||||
|
$ docker buildx bake --set target.platform+=linux/arm64 # appends 'linux/arm64' to the platform list
|
||||||
```
|
```
|
||||||
|
|
||||||
You can override the following fields:
|
You can override the following fields:
|
||||||
|
|
||||||
|
* `annotations`
|
||||||
|
* `attest`
|
||||||
* `args`
|
* `args`
|
||||||
* `cache-from`
|
* `cache-from`
|
||||||
* `cache-to`
|
* `cache-to`
|
||||||
* `context`
|
* `context`
|
||||||
* `dockerfile`
|
* `dockerfile`
|
||||||
|
* `entitlements`
|
||||||
* `labels`
|
* `labels`
|
||||||
* `load`
|
* `load`
|
||||||
* `no-cache`
|
* `no-cache`
|
||||||
@@ -260,3 +375,20 @@ You can override the following fields:
|
|||||||
* `ssh`
|
* `ssh`
|
||||||
* `tags`
|
* `tags`
|
||||||
* `target`
|
* `target`
|
||||||
|
|
||||||
|
You can append using `+=` operator for the following fields:
|
||||||
|
|
||||||
|
* `annotations`¹
|
||||||
|
* `attest`¹
|
||||||
|
* `cache-from`
|
||||||
|
* `cache-to`
|
||||||
|
* `entitlements`¹
|
||||||
|
* `no-cache-filter`
|
||||||
|
* `output`
|
||||||
|
* `platform`
|
||||||
|
* `secrets`
|
||||||
|
* `ssh`
|
||||||
|
* `tags`
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> ¹ These fields already append by default.
|
||||||
|
|||||||
@@ -13,46 +13,46 @@ Start a build
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:----------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||||
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| [`--allow`](#allow) | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||||
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||||
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||||
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--label` | `stringArray` | | Set metadata for an image |
|
||||||
| [`--load`](#load) | `bool` | | Shorthand for `--output=type=docker` |
|
| [`--load`](#load) | `bool` | | Shorthand for `--output=type=docker` |
|
||||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||||
| [`--network`](#network) | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
| [`--network`](#network) | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||||
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
||||||
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
|
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
|
||||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||||
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
|
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
|
||||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||||
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
|
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
|
||||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||||
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
|
||||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
|
||||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||||
| [`--target`](#target) | `string` | | Set the target build stage to build |
|
| [`--target`](#target) | `string` | | Set the target build stage to build |
|
||||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -828,8 +828,12 @@ $ docker buildx build --platform=darwin .
|
|||||||
--progress=VALUE
|
--progress=VALUE
|
||||||
```
|
```
|
||||||
|
|
||||||
Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
|
Set type of progress output. Supported values are:
|
||||||
output (default `auto`).
|
- `auto` (default): Uses the `tty` mode if the client is a TTY, or `plain` otherwise
|
||||||
|
- `tty`: An interactive stream of the output with color and redrawing
|
||||||
|
- `plain`: Prints the raw build progress in a plaintext format
|
||||||
|
- `quiet`: Suppress the build output and print image ID on success (same as `--quiet`)
|
||||||
|
- `rawjson`: Prints the raw build progress as JSON lines
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
||||||
|
|||||||
@@ -9,46 +9,46 @@ Start a build
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|:--------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
|
|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||||
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| `--allow` | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
| `--annotation` | `stringArray` | | Add annotation to the image |
|
| `--annotation` | `stringArray` | | Add annotation to the image |
|
||||||
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||||
| `--build-arg` | `stringArray` | | Set build-time variables |
|
| `--build-arg` | `stringArray` | | Set build-time variables |
|
||||||
| `--build-context` | `stringArray` | | Additional build contexts (e.g., name=path) |
|
| `--build-context` | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||||
| `--builder` | `string` | | Override the configured builder instance |
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
| `--cache-from` | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
| `--cache-from` | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||||
| `--cache-to` | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
| `--cache-to` | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||||
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||||
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||||
| `--check` | `bool` | | Shorthand for `--call=check` |
|
| `--check` | `bool` | | Shorthand for `--call=check` |
|
||||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||||
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--label` | `stringArray` | | Set metadata for an image |
|
||||||
| `--load` | `bool` | | Shorthand for `--output=type=docker` |
|
| `--load` | `bool` | | Shorthand for `--output=type=docker` |
|
||||||
| `--metadata-file` | `string` | | Write build result metadata to a file |
|
| `--metadata-file` | `string` | | Write build result metadata to a file |
|
||||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||||
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
||||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||||
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||||
| `--platform` | `stringArray` | | Set target platform for build |
|
| `--platform` | `stringArray` | | Set target platform for build |
|
||||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||||
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
|
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
|
||||||
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
| `--pull` | `bool` | | Always attempt to pull all referenced images |
|
||||||
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
|
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
|
||||||
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
|
||||||
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
|
||||||
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
|
||||||
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||||
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
|
||||||
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
|
||||||
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||||
| `--target` | `string` | | Set the target build stage to build |
|
| `--target` | `string` | | Set the target build stage to build |
|
||||||
| `--ulimit` | `ulimit` | | Ulimit options |
|
| `--ulimit` | `ulimit` | | Ulimit options |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
28
docs/reference/buildx_history.md
Normal file
28
docs/reference/buildx_history.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# docker buildx history
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Commands to work on build records
|
||||||
|
|
||||||
|
### Subcommands
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
|:---------------------------------------|:-----------------------------------------------|
|
||||||
|
| [`import`](buildx_history_import.md) | Import a build into Docker Desktop |
|
||||||
|
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
|
||||||
|
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
|
||||||
|
| [`ls`](buildx_history_ls.md) | List build records |
|
||||||
|
| [`open`](buildx_history_open.md) | Open a build in Docker Desktop |
|
||||||
|
| [`rm`](buildx_history_rm.md) | Remove build records |
|
||||||
|
| [`trace`](buildx_history_trace.md) | Show the OpenTelemetry trace of a build record |
|
||||||
|
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
16
docs/reference/buildx_history_import.md
Normal file
16
docs/reference/buildx_history_import.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# docker buildx history import
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Import a build into Docker Desktop
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:--------------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| `-f`, `--file` | `stringArray` | | Import from a file path |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
117
docs/reference/buildx_history_inspect.md
Normal file
117
docs/reference/buildx_history_inspect.md
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
# docker buildx history inspect
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Inspect a build
|
||||||
|
|
||||||
|
### Subcommands
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
|:-----------------------------------------------------|:---------------------------|
|
||||||
|
| [`attachment`](buildx_history_inspect_attachment.md) | Inspect a build attachment |
|
||||||
|
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------------|:---------|:---------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| [`--format`](#format) | `string` | `pretty` | Format the output |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="format"></a> Format the output (--format)
|
||||||
|
|
||||||
|
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
|
||||||
|
`json` or using a Go template.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect
|
||||||
|
Name: buildx (binaries)
|
||||||
|
Context: .
|
||||||
|
Dockerfile: Dockerfile
|
||||||
|
VCS Repository: https://github.com/crazy-max/buildx.git
|
||||||
|
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
|
||||||
|
Target: binaries
|
||||||
|
Platforms: linux/amd64
|
||||||
|
Keep Git Dir: true
|
||||||
|
|
||||||
|
Started: 2025-02-07 11:56:24
|
||||||
|
Duration: 1m 1s
|
||||||
|
Build Steps: 16/16 (25% cached)
|
||||||
|
|
||||||
|
Image Resolve Mode: local
|
||||||
|
|
||||||
|
Materials:
|
||||||
|
URI DIGEST
|
||||||
|
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
|
||||||
|
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
|
||||||
|
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
|
||||||
|
|
||||||
|
Attachments:
|
||||||
|
DIGEST PLATFORM TYPE
|
||||||
|
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
|
||||||
|
|
||||||
|
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect --format json
|
||||||
|
{
|
||||||
|
"Name": "buildx (binaries)",
|
||||||
|
"Ref": "5w7vkqfi0rf59hw4hnmn627r9",
|
||||||
|
"Context": ".",
|
||||||
|
"Dockerfile": "Dockerfile",
|
||||||
|
"VCSRepository": "https://github.com/crazy-max/buildx.git",
|
||||||
|
"VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361",
|
||||||
|
"Target": "binaries",
|
||||||
|
"Platform": [
|
||||||
|
"linux/amd64"
|
||||||
|
],
|
||||||
|
"KeepGitDir": true,
|
||||||
|
"StartedAt": "2025-02-07T12:01:05.75807272+01:00",
|
||||||
|
"CompletedAt": "2025-02-07T12:02:07.991778875+01:00",
|
||||||
|
"Duration": 62233706155,
|
||||||
|
"Status": "completed",
|
||||||
|
"NumCompletedSteps": 16,
|
||||||
|
"NumTotalSteps": 16,
|
||||||
|
"NumCachedSteps": 4,
|
||||||
|
"Config": {
|
||||||
|
"ImageResolveMode": "local"
|
||||||
|
},
|
||||||
|
"Materials": [
|
||||||
|
{
|
||||||
|
"URI": "pkg:docker/docker/dockerfile@1",
|
||||||
|
"Digests": [
|
||||||
|
"sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64",
|
||||||
|
"Digests": [
|
||||||
|
"sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64",
|
||||||
|
"Digests": [
|
||||||
|
"sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Attachments": [
|
||||||
|
{
|
||||||
|
"Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1",
|
||||||
|
"Type": "https://slsa.dev/provenance/v0.2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
|
||||||
|
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
|
||||||
|
```
|
||||||
17
docs/reference/buildx_history_inspect_attachment.md
Normal file
17
docs/reference/buildx_history_inspect_attachment.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# docker buildx history inspect attachment
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Inspect a build attachment
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| `--platform` | `string` | | Platform of attachment |
|
||||||
|
| `--type` | `string` | | Type of attachment |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
16
docs/reference/buildx_history_logs.md
Normal file
16
docs/reference/buildx_history_logs.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# docker buildx history logs
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Print the logs of a build
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:--------------------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| `--progress` | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
17
docs/reference/buildx_history_ls.md
Normal file
17
docs/reference/buildx_history_ls.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# docker buildx history ls
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
List build records
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
| `--format` | `string` | `table` | Format the output |
|
||||||
|
| `--no-trunc` | `bool` | | Don't truncate output |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
15
docs/reference/buildx_history_open.md
Normal file
15
docs/reference/buildx_history_open.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# docker buildx history open
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Open a build in Docker Desktop
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
16
docs/reference/buildx_history_rm.md
Normal file
16
docs/reference/buildx_history_rm.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# docker buildx history rm
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Remove build records
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||||
|
| `--all` | `bool` | | Remove all build records |
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
17
docs/reference/buildx_history_trace.md
Normal file
17
docs/reference/buildx_history_trace.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# docker buildx history trace
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Show the OpenTelemetry trace of a build record
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Type | Default | Description |
|
||||||
|
|:----------------|:---------|:--------------|:-----------------------------------------|
|
||||||
|
| `--addr` | `string` | `127.0.0.1:0` | Address to bind the UI server |
|
||||||
|
| `--builder` | `string` | | Override the configured builder instance |
|
||||||
|
| `--compare` | `string` | | Compare with another build reference |
|
||||||
|
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
@@ -23,10 +23,10 @@ import (
|
|||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/api/types/system"
|
"github.com/docker/docker/api/types/system"
|
||||||
dockerclient "github.com/docker/docker/client"
|
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
dockerarchive "github.com/docker/docker/pkg/archive"
|
||||||
"github.com/docker/docker/pkg/idtools"
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -56,6 +56,7 @@ type Driver struct {
|
|||||||
restartPolicy container.RestartPolicy
|
restartPolicy container.RestartPolicy
|
||||||
env []string
|
env []string
|
||||||
defaultLoad bool
|
defaultLoad bool
|
||||||
|
gpus []container.DeviceRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
@@ -70,7 +71,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||||||
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
||||||
_, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
_, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if dockerclient.IsErrNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
return d.create(ctx, sub)
|
return d.create(ctx, sub)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -95,19 +96,20 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
|
resp, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
|
||||||
RegistryAuth: ra,
|
RegistryAuth: ra,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = io.Copy(io.Discard, rc)
|
defer resp.Close()
|
||||||
return err
|
return jsonmessage.DisplayJSONMessagesStream(resp, io.Discard, 0, false, nil)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
// image pulling failed, check if it exists in local image store.
|
// image pulling failed, check if it exists in local image store.
|
||||||
// if not, return pulling error. otherwise log it.
|
// if not, return pulling error. otherwise log it.
|
||||||
_, _, errInspect := d.DockerAPI.ImageInspectWithRaw(ctx, imageName)
|
_, errInspect := d.DockerAPI.ImageInspect(ctx, imageName)
|
||||||
if errInspect != nil {
|
found := errInspect == nil
|
||||||
|
if !found {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
||||||
@@ -157,6 +159,9 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
if d.cpusetMems != "" {
|
if d.cpusetMems != "" {
|
||||||
hc.Resources.CpusetMems = d.cpusetMems
|
hc.Resources.CpusetMems = d.cpusetMems
|
||||||
}
|
}
|
||||||
|
if len(d.gpus) > 0 && d.hasGPUCapability(ctx, cfg.Image, d.gpus) {
|
||||||
|
hc.Resources.DeviceRequests = d.gpus
|
||||||
|
}
|
||||||
if info, err := d.DockerAPI.Info(ctx); err == nil {
|
if info, err := d.DockerAPI.Info(ctx); err == nil {
|
||||||
if info.CgroupDriver == "cgroupfs" {
|
if info.CgroupDriver == "cgroupfs" {
|
||||||
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
||||||
@@ -177,7 +182,6 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||||
if err != nil && !errdefs.IsConflict(err) {
|
if err != nil && !errdefs.IsConflict(err) {
|
||||||
@@ -213,7 +217,7 @@ func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
|
|||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
||||||
try++
|
try++
|
||||||
continue
|
continue
|
||||||
@@ -307,7 +311,7 @@ func (d *Driver) start(ctx context.Context) error {
|
|||||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||||
ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if dockerclient.IsErrNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
return &driver.Info{
|
return &driver.Info{
|
||||||
Status: driver.Inactive,
|
Status: driver.Inactive,
|
||||||
}, nil
|
}, nil
|
||||||
@@ -420,6 +424,7 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: true,
|
driver.DockerExporter: true,
|
||||||
driver.CacheExport: true,
|
driver.CacheExport: true,
|
||||||
driver.MultiPlatform: true,
|
driver.MultiPlatform: true,
|
||||||
|
driver.DirectPush: true,
|
||||||
driver.DefaultLoad: d.defaultLoad,
|
driver.DefaultLoad: d.defaultLoad,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -428,6 +433,31 @@ func (d *Driver) HostGatewayIP(ctx context.Context) (net.IP, error) {
|
|||||||
return nil, errors.New("host-gateway is not supported by the docker-container driver")
|
return nil, errors.New("host-gateway is not supported by the docker-container driver")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasGPUCapability checks if docker daemon has GPU capability. We need to run
|
||||||
|
// a dummy container with GPU device to check if the daemon has this capability
|
||||||
|
// because there is no API to check it yet.
|
||||||
|
func (d *Driver) hasGPUCapability(ctx context.Context, image string, gpus []container.DeviceRequest) bool {
|
||||||
|
cfg := &container.Config{
|
||||||
|
Image: image,
|
||||||
|
Entrypoint: []string{"/bin/true"},
|
||||||
|
}
|
||||||
|
hc := &container.HostConfig{
|
||||||
|
NetworkMode: container.NetworkMode(container.IPCModeNone),
|
||||||
|
AutoRemove: true,
|
||||||
|
Resources: container.Resources{
|
||||||
|
DeviceRequests: gpus,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
resp, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, "")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err := d.DockerAPI.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func demuxConn(c net.Conn) net.Conn {
|
func demuxConn(c net.Conn) net.Conn {
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
// TODO: rewrite parser with Reader() to avoid goroutine switch
|
// TODO: rewrite parser with Reader() to avoid goroutine switch
|
||||||
|
|||||||
@@ -51,6 +51,12 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
InitConfig: cfg,
|
InitConfig: cfg,
|
||||||
restartPolicy: rp,
|
restartPolicy: rp,
|
||||||
}
|
}
|
||||||
|
var gpus dockeropts.GpuOpts
|
||||||
|
if err := gpus.Set("all"); err == nil {
|
||||||
|
if v := gpus.Value(); len(v) > 0 {
|
||||||
|
d.gpus = v
|
||||||
|
}
|
||||||
|
}
|
||||||
for k, v := range cfg.DriverOpts {
|
for k, v := range cfg.DriverOpts {
|
||||||
switch {
|
switch {
|
||||||
case k == "network":
|
case k == "network":
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||||
_, err := d.DockerAPI.ServerVersion(ctx)
|
_, err := d.DockerAPI.ServerVersion(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
return nil, errors.Wrap(driver.ErrNotConnecting{}, err.Error())
|
||||||
}
|
}
|
||||||
return &driver.Info{
|
return &driver.Info{
|
||||||
Status: driver.Running,
|
Status: driver.Running,
|
||||||
@@ -39,7 +39,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|||||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
func (d *Driver) Version(ctx context.Context) (string, error) {
|
||||||
v, err := d.DockerAPI.ServerVersion(ctx)
|
v, err := d.DockerAPI.ServerVersion(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
return "", errors.Wrap(driver.ErrNotConnecting{}, err.Error())
|
||||||
}
|
}
|
||||||
if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" {
|
if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" {
|
||||||
return bkversion, nil
|
return bkversion, nil
|
||||||
@@ -93,6 +93,7 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: useContainerdSnapshotter,
|
driver.DockerExporter: useContainerdSnapshotter,
|
||||||
driver.CacheExport: useContainerdSnapshotter,
|
driver.CacheExport: useContainerdSnapshotter,
|
||||||
driver.MultiPlatform: useContainerdSnapshotter,
|
driver.MultiPlatform: useContainerdSnapshotter,
|
||||||
|
driver.DirectPush: useContainerdSnapshotter,
|
||||||
driver.DefaultLoad: true,
|
driver.DefaultLoad: true,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -176,11 +176,6 @@ func resolveBuildKitVersion(ver string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
//if _, errs := c.Validate(mobyVersion); len(errs) > 0 {
|
|
||||||
// for _, err := range errs {
|
|
||||||
// fmt.Printf("%s: %v\n", m.MobyVersionConstraint, err)
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
if !c.Check(mobyVersion) {
|
if !c.Check(mobyVersion) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,5 +7,6 @@ const DockerExporter Feature = "Docker exporter"
|
|||||||
|
|
||||||
const CacheExport Feature = "Cache export"
|
const CacheExport Feature = "Cache export"
|
||||||
const MultiPlatform Feature = "Multi-platform build"
|
const MultiPlatform Feature = "Multi-platform build"
|
||||||
|
const DirectPush Feature = "Direct push"
|
||||||
|
|
||||||
const DefaultLoad Feature = "Automatically load images to the Docker Engine image store"
|
const DefaultLoad Feature = "Automatically load images to the Docker Engine image store"
|
||||||
|
|||||||
@@ -35,10 +35,10 @@ func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLS
|
|||||||
}
|
}
|
||||||
|
|
||||||
var testStoreCfg = store.NewConfig(
|
var testStoreCfg = store.NewConfig(
|
||||||
func() interface{} {
|
func() any {
|
||||||
return &map[string]interface{}{}
|
return &map[string]any{}
|
||||||
},
|
},
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSaveLoadContexts(t *testing.T) {
|
func TestSaveLoadContexts(t *testing.T) {
|
||||||
@@ -191,13 +191,13 @@ func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca,
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, proxyURLString, proxyURL.String())
|
assert.Equal(t, proxyURLString, proxyURL.String())
|
||||||
} else {
|
} else {
|
||||||
assert.True(t, cfg.Proxy == nil, "expected proxy to be nil, but is not nil instead")
|
assert.Nil(t, cfg.Proxy, "expected proxy to be nil, but is not nil instead")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func save(s store.Writer, ep Endpoint, name string) error {
|
func save(s store.Writer, ep Endpoint, name string) error {
|
||||||
meta := store.Metadata{
|
meta := store.Metadata{
|
||||||
Endpoints: map[string]interface{}{
|
Endpoints: map[string]any{
|
||||||
KubernetesEndpoint: ep.EndpointMeta,
|
KubernetesEndpoint: ep.EndpointMeta,
|
||||||
},
|
},
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -224,7 +224,7 @@ func TestSaveLoadGKEConfig(t *testing.T) {
|
|||||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
persistedMetadata, err := store.GetMetadata("gke-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||||
assert.True(t, persistedEPMeta != nil)
|
assert.NotNil(t, persistedEPMeta)
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
persistedCfg := persistedEP.KubernetesConfig()
|
||||||
@@ -249,7 +249,7 @@ func TestSaveLoadEKSConfig(t *testing.T) {
|
|||||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
persistedMetadata, err := store.GetMetadata("eks-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||||
assert.True(t, persistedEPMeta != nil)
|
assert.NotNil(t, persistedEPMeta)
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
persistedCfg := persistedEP.KubernetesConfig()
|
||||||
@@ -274,14 +274,14 @@ func TestSaveLoadK3SConfig(t *testing.T) {
|
|||||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
persistedMetadata, err := store.GetMetadata("k3s-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||||
assert.True(t, persistedEPMeta != nil)
|
assert.NotNil(t, persistedEPMeta)
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
persistedCfg := persistedEP.KubernetesConfig()
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
actualCfg, err := persistedCfg.ClientConfig()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.True(t, len(actualCfg.Username) > 0)
|
assert.Greater(t, len(actualCfg.Username), 0)
|
||||||
assert.True(t, len(actualCfg.Password) > 0)
|
assert.Greater(t, len(actualCfg.Password), 0)
|
||||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
||||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ type Endpoint struct {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
command.RegisterDefaultStoreEndpoints(
|
command.RegisterDefaultStoreEndpoints(
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,7 +96,7 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
|||||||
|
|
||||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||||
// endpoint, which is derived from the env-based kubeconfig.
|
// endpoint, which is derived from the env-based kubeconfig.
|
||||||
func (c *EndpointMeta) ResolveDefault() (interface{}, *store.EndpointTLSData, error) {
|
func (c *EndpointMeta) ResolveDefault() (any, *store.EndpointTLSData, error) {
|
||||||
kubeconfig := os.Getenv("KUBECONFIG")
|
kubeconfig := os.Getenv("KUBECONFIG")
|
||||||
if kubeconfig == "" {
|
if kubeconfig == "" {
|
||||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ func (d *Driver) wait(ctx context.Context) error {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
case <-timeoutChan:
|
case <-timeoutChan:
|
||||||
return err
|
return err
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
@@ -238,6 +238,7 @@ func (d *Driver) Features(_ context.Context) map[driver.Feature]bool {
|
|||||||
driver.DockerExporter: d.DockerAPI != nil,
|
driver.DockerExporter: d.DockerAPI != nil,
|
||||||
driver.CacheExport: true,
|
driver.CacheExport: true,
|
||||||
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
||||||
|
driver.DirectPush: true,
|
||||||
driver.DefaultLoad: d.defaultLoad,
|
driver.DefaultLoad: d.defaultLoad,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user