mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-09-06 19:09:08 +08:00
Compare commits
498 Commits
v0.17.0-rc
...
v0.23.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f58f0221e2 | ||
|
|
9696b50d1e | ||
|
|
15495efa86 | ||
|
|
bde47313d4 | ||
|
|
d69301d57b | ||
|
|
ee77cdb175 | ||
|
|
8fb1157b5f | ||
|
|
a34cdff84e | ||
|
|
77139daa4b | ||
|
|
10e3892a63 | ||
|
|
d80ece5bb3 | ||
|
|
1f44971fc9 | ||
|
|
a91db7ccc9 | ||
|
|
98c3abb756 | ||
|
|
3b824a0e39 | ||
|
|
b0156cd631 | ||
|
|
29614f9734 | ||
|
|
f1b895196c | ||
|
|
900502b139 | ||
|
|
49bd7e4edc | ||
|
|
8f9c25e8b0 | ||
|
|
7659798f80 | ||
|
|
7b8bf9f801 | ||
|
|
8efc528b84 | ||
|
|
8593e0397b | ||
|
|
0c0e8eefdf | ||
|
|
e114dd09a5 | ||
|
|
d25e260d2e | ||
|
|
86e4e77ac1 | ||
|
|
534d9fc276 | ||
|
|
e0c67bfc79 | ||
|
|
53e576b306 | ||
|
|
d3aef6642c | ||
|
|
824cef1b92 | ||
|
|
a8b0fa8965 | ||
|
|
45dfb84361 | ||
|
|
13ef01196d | ||
|
|
646df6d4a0 | ||
|
|
d46c1d8141 | ||
|
|
c682742de0 | ||
|
|
391acba718 | ||
|
|
db4b96e62c | ||
|
|
882ef0db91 | ||
|
|
967fc2a696 | ||
|
|
212d598ab1 | ||
|
|
bf95aa3dfa | ||
|
|
18ccba0720 | ||
|
|
f5196f1167 | ||
|
|
ef99381eab | ||
|
|
a41c9fa649 | ||
|
|
00fdcd38ab | ||
|
|
97f1d47464 | ||
|
|
337578242d | ||
|
|
503a8925d2 | ||
|
|
0d708c0bc2 | ||
|
|
3a7523a117 | ||
|
|
5dc1a3308d | ||
|
|
eb78253dfd | ||
|
|
5f8b78a113 | ||
|
|
67d3ed34e4 | ||
|
|
b88423be50 | ||
|
|
c1e2ae5636 | ||
|
|
23afb70e40 | ||
|
|
812b42b329 | ||
|
|
d5d3d3d502 | ||
|
|
e19c729d3e | ||
|
|
aefa49c4fa | ||
|
|
7d927ee604 | ||
|
|
058c098c8c | ||
|
|
7b7dbe88b1 | ||
|
|
cadf4a5893 | ||
|
|
6cd9fef556 | ||
|
|
963b9ca30d | ||
|
|
4636c8051a | ||
|
|
e23695d50d | ||
|
|
6eff9b2d51 | ||
|
|
fcbfc85f42 | ||
|
|
9a204c44c3 | ||
|
|
4c6eba5acd | ||
|
|
fea7459880 | ||
|
|
e2d52a8465 | ||
|
|
48a591b1e1 | ||
|
|
128acdb471 | ||
|
|
411d3f8cea | ||
|
|
7925a96726 | ||
|
|
b06bddfee6 | ||
|
|
fe17ebda89 | ||
|
|
4ed1e07f16 | ||
|
|
f49593ce2c | ||
|
|
4e91fe6507 | ||
|
|
921b576f3a | ||
|
|
548c80ab5a | ||
|
|
f3a4740d5f | ||
|
|
89917dc696 | ||
|
|
f7276201ac | ||
|
|
beb9f515c0 | ||
|
|
4f7d145c0e | ||
|
|
ccdf63c644 | ||
|
|
9a6b8754b1 | ||
|
|
e75ac22ba6 | ||
|
|
62f5cc7c80 | ||
|
|
6272ae1afa | ||
|
|
accfbf6e24 | ||
|
|
af2d8fe555 | ||
|
|
18f4275a92 | ||
|
|
221a608b3c | ||
|
|
cc0391eba5 | ||
|
|
aef388bf7a | ||
|
|
80c16bc28c | ||
|
|
75160643e1 | ||
|
|
ad18ffc018 | ||
|
|
80c3832c94 | ||
|
|
7762ab2c38 | ||
|
|
b973de2dd3 | ||
|
|
352ce7e875 | ||
|
|
cdfc1ed750 | ||
|
|
d0d3433b12 | ||
|
|
b04d39494f | ||
|
|
52f503e806 | ||
|
|
79a978484d | ||
|
|
f7992033bf | ||
|
|
73f61aa338 | ||
|
|
faa573f484 | ||
|
|
0a4a1babd1 | ||
|
|
461bd9e5d1 | ||
|
|
d6fdf83f45 | ||
|
|
ef4e9fea83 | ||
|
|
0c296fe857 | ||
|
|
ef73c64d2c | ||
|
|
1784f84561 | ||
|
|
6a6fa4f422 | ||
|
|
2dc0350ffe | ||
|
|
b85fc5c484 | ||
|
|
2389d457a4 | ||
|
|
3f82aadc6e | ||
|
|
79e3f12305 | ||
|
|
1dc5f0751b | ||
|
|
7ba4da0800 | ||
|
|
a64e628774 | ||
|
|
1c4b1a376c | ||
|
|
e1f690abfc | ||
|
|
03569c2188 | ||
|
|
350d3f0f4b | ||
|
|
dc27815236 | ||
|
|
1089ff7341 | ||
|
|
7433d37183 | ||
|
|
f9a76355b5 | ||
|
|
cfeea34b2d | ||
|
|
ba2d3692a6 | ||
|
|
853b593a4d | ||
|
|
efb300e613 | ||
|
|
cee7b344da | ||
|
|
67dbde6970 | ||
|
|
295653dabb | ||
|
|
f5802119c5 | ||
|
|
40b9ac1ec5 | ||
|
|
f11496448a | ||
|
|
c8c9c72ca6 | ||
|
|
9fe8139022 | ||
|
|
b3e8c62635 | ||
|
|
b8e9c28315 | ||
|
|
3ae9970da5 | ||
|
|
1d219100fc | ||
|
|
464f9278d1 | ||
|
|
7216086b8c | ||
|
|
b195b80ddf | ||
|
|
70a5e266d1 | ||
|
|
689bea7963 | ||
|
|
5176c38115 | ||
|
|
ec440c4574 | ||
|
|
0a4eb7ec76 | ||
|
|
f710c93157 | ||
|
|
d1a0a1497c | ||
|
|
c880ecd513 | ||
|
|
d557da1935 | ||
|
|
417af36abc | ||
|
|
e236b86297 | ||
|
|
633e8a0881 | ||
|
|
5e1ea62f92 | ||
|
|
4b90b84995 | ||
|
|
abc85c38f8 | ||
|
|
ccca7c795a | ||
|
|
04aab6958c | ||
|
|
9d640f0e33 | ||
|
|
b76fdcaf8d | ||
|
|
d693e18c04 | ||
|
|
b066ee1110 | ||
|
|
cf8bf9e104 | ||
|
|
3bd54b19aa | ||
|
|
934841f329 | ||
|
|
b2ababc7b6 | ||
|
|
0ccdb7e248 | ||
|
|
cacb4fb9b3 | ||
|
|
df80bd72c6 | ||
|
|
bb4bef2f04 | ||
|
|
a11507344a | ||
|
|
17af006857 | ||
|
|
11c84973ef | ||
|
|
cc4a291f6a | ||
|
|
aa1fbc0421 | ||
|
|
b2bbb337e4 | ||
|
|
012df71b63 | ||
|
|
a26bb271ab | ||
|
|
3e0682f039 | ||
|
|
3aed658dc4 | ||
|
|
b4a0dee723 | ||
|
|
6904512813 | ||
|
|
d41e335466 | ||
|
|
0954dcb5fd | ||
|
|
38f64bf709 | ||
|
|
c1d3955fbe | ||
|
|
d0b63e60e2 | ||
|
|
e141c8fa71 | ||
|
|
2ee156236b | ||
|
|
1335264c9d | ||
|
|
e74185aa6d | ||
|
|
0224773102 | ||
|
|
8c27b5c545 | ||
|
|
f7594d484b | ||
|
|
f118749cdc | ||
|
|
0d92ad713c | ||
|
|
a18ff4d5ef | ||
|
|
b035a04aaa | ||
|
|
6220e0aae8 | ||
|
|
d9abc78e8f | ||
|
|
3313026961 | ||
|
|
06912aa24c | ||
|
|
cde0e9814d | ||
|
|
2e6e146087 | ||
|
|
af3cbe6cec | ||
|
|
1ef9e67cbb | ||
|
|
75204426bd | ||
|
|
b73f58a90b | ||
|
|
6f5486e718 | ||
|
|
3fa0c3d122 | ||
|
|
b0b902de41 | ||
|
|
77d632e0c5 | ||
|
|
6a12543db3 | ||
|
|
4027b60fa0 | ||
|
|
dda8df3b06 | ||
|
|
d54a110b3c | ||
|
|
44fa243d58 | ||
|
|
630066bfc5 | ||
|
|
026ac2313c | ||
|
|
45fc5ed3b3 | ||
|
|
1eb167a767 | ||
|
|
45d2ec69f1 | ||
|
|
793ec7f3b2 | ||
|
|
6cb62dddf2 | ||
|
|
66ecb53fa7 | ||
|
|
26026810fe | ||
|
|
d3830e0a6e | ||
|
|
8c2759f6ae | ||
|
|
8a472c6c9d | ||
|
|
b98653d8fe | ||
|
|
807d15ff9d | ||
|
|
ac636fd2d8 | ||
|
|
769d22fb30 | ||
|
|
e36535e137 | ||
|
|
ada44e82ea | ||
|
|
16edf5d4aa | ||
|
|
11c85b2369 | ||
|
|
41215835cf | ||
|
|
a41fc81796 | ||
|
|
5f057bdee7 | ||
|
|
883806524a | ||
|
|
38b71998f5 | ||
|
|
07db2be2f0 | ||
|
|
f3f5e760b3 | ||
|
|
e762d3dbca | ||
|
|
4ecbb018f2 | ||
|
|
a8f4699c5e | ||
|
|
7cf12fce98 | ||
|
|
07190d20da | ||
|
|
c79368c199 | ||
|
|
f47d12e692 | ||
|
|
0fc204915a | ||
|
|
3a0eeeacd5 | ||
|
|
e6ce3917d3 | ||
|
|
e085ed8c5c | ||
|
|
b83c3e239e | ||
|
|
a90d5794ee | ||
|
|
c571b9d730 | ||
|
|
af53930206 | ||
|
|
c4a2db8f0c | ||
|
|
206bd6c3a2 | ||
|
|
5c169dd878 | ||
|
|
875e717361 | ||
|
|
72c3d4a237 | ||
|
|
ce46297960 | ||
|
|
e8389c8a02 | ||
|
|
804ee66f13 | ||
|
|
5c5bc510ac | ||
|
|
0dfc4a1019 | ||
|
|
1e992b295c | ||
|
|
4f81bcb5c8 | ||
|
|
3771fe2034 | ||
|
|
5dd4ae0335 | ||
|
|
567361d494 | ||
|
|
21b1be1667 | ||
|
|
876e003685 | ||
|
|
a53ed0a354 | ||
|
|
737da6959d | ||
|
|
6befa70cc8 | ||
|
|
2d051bde96 | ||
|
|
63985b591b | ||
|
|
695200c81a | ||
|
|
828c1dbf98 | ||
|
|
f321d4ac95 | ||
|
|
0d13bf6606 | ||
|
|
3e3242cfdd | ||
|
|
f9e2d07b30 | ||
|
|
c281e18892 | ||
|
|
98d4cb1eb3 | ||
|
|
70f2fb6442 | ||
|
|
fdac6d5fe7 | ||
|
|
d4eca07af8 | ||
|
|
95e77da0fa | ||
|
|
6810a7c69c | ||
|
|
dd596d6542 | ||
|
|
c6e403ad7f | ||
|
|
d6d713aac6 | ||
|
|
f148976e6e | ||
|
|
8f70196de1 | ||
|
|
e196855bed | ||
|
|
71c7889719 | ||
|
|
a3418e0178 | ||
|
|
6a1cf78879 | ||
|
|
ec1f712328 | ||
|
|
5ce6597c07 | ||
|
|
9c75071793 | ||
|
|
d612139b19 | ||
|
|
42f7898c53 | ||
|
|
3148c098a2 | ||
|
|
f95d574f94 | ||
|
|
60822781be | ||
|
|
4c83475703 | ||
|
|
17eff25fe5 | ||
|
|
9c8ffb77d6 | ||
|
|
13a426fca6 | ||
|
|
1a039115bc | ||
|
|
07d58782b8 | ||
|
|
3ccbb88e6a | ||
|
|
a34c641bc4 | ||
|
|
f10be074b4 | ||
|
|
9f429965c0 | ||
|
|
f3929447d7 | ||
|
|
615f4f6759 | ||
|
|
9a7b028bab | ||
|
|
1af4f05ba4 | ||
|
|
4b5d78db9b | ||
|
|
d2c512a95b | ||
|
|
5937ba0e00 | ||
|
|
21fb026aa3 | ||
|
|
bc45641086 | ||
|
|
96689e5d05 | ||
|
|
50a8f11f0f | ||
|
|
11cf38bd97 | ||
|
|
300d56b3ff | ||
|
|
e04da86aca | ||
|
|
9f1fc99018 | ||
|
|
26bbddb5d6 | ||
|
|
58fd190c31 | ||
|
|
e7a53fb829 | ||
|
|
c0fd64f4f8 | ||
|
|
0c629335ac | ||
|
|
f216b71ad2 | ||
|
|
debe8c0187 | ||
|
|
a69d857b8a | ||
|
|
a6ef9db84d | ||
|
|
9c27be752c | ||
|
|
82a65d4f9b | ||
|
|
8647f408ac | ||
|
|
e51cdcac50 | ||
|
|
55a544d976 | ||
|
|
3b943bd4ba | ||
|
|
502bb51a3b | ||
|
|
48977780ad | ||
|
|
e540bb03a4 | ||
|
|
919c52395d | ||
|
|
7f01c63be7 | ||
|
|
076d2f19d5 | ||
|
|
3c3150b8d3 | ||
|
|
b03d8c52e1 | ||
|
|
e67ccb080b | ||
|
|
dab02c347e | ||
|
|
6caa151e98 | ||
|
|
be6d8326a8 | ||
|
|
7855f8324b | ||
|
|
850e5330ad | ||
|
|
b7ea25eb59 | ||
|
|
8cdeac54ab | ||
|
|
752c70a06c | ||
|
|
83dd969dc1 | ||
|
|
a5bb117ff0 | ||
|
|
735b7f68fe | ||
|
|
bcac44f658 | ||
|
|
d46595eed8 | ||
|
|
62407927fa | ||
|
|
c7b0a84c6a | ||
|
|
1aac809c63 | ||
|
|
9b0575b589 | ||
|
|
9f3a578149 | ||
|
|
14b31d8b77 | ||
|
|
e26911f403 | ||
|
|
cd8d61a9d7 | ||
|
|
3a56161d03 | ||
|
|
0fd935b0ca | ||
|
|
704b2cc52d | ||
|
|
6b2dc8ce56 | ||
|
|
a585faf3d2 | ||
|
|
181348397c | ||
|
|
ad371e428e | ||
|
|
f35dae3726 | ||
|
|
6fcc6853d9 | ||
|
|
202c390fca | ||
|
|
ca502cc9a5 | ||
|
|
2bdf451b68 | ||
|
|
658ed584c7 | ||
|
|
886ae21e93 | ||
|
|
cf7a9aa084 | ||
|
|
eb15c667b9 | ||
|
|
1060328a96 | ||
|
|
746eadd16e | ||
|
|
f89f861999 | ||
|
|
08a973a148 | ||
|
|
cc286e2ef5 | ||
|
|
8056a3dc7c | ||
|
|
9f0ebd2643 | ||
|
|
680cdf1179 | ||
|
|
8d32cabc22 | ||
|
|
239930c998 | ||
|
|
8d7f69883f | ||
|
|
1de332530f | ||
|
|
65c4756473 | ||
|
|
d3ff70ace0 | ||
|
|
14de641bec | ||
|
|
1ce3e6a221 | ||
|
|
b1a13bb740 | ||
|
|
64c5139ab6 | ||
|
|
d353f5f6ba | ||
|
|
4507a492da | ||
|
|
9fc6f39d71 | ||
|
|
f6a27a664b | ||
|
|
48153169d8 | ||
|
|
d7de22c61f | ||
|
|
7c91f3d0dd | ||
|
|
820f5e77ed | ||
|
|
1db8f6789f | ||
|
|
b35a0f4718 | ||
|
|
8e47387d02 | ||
|
|
fdda92f304 | ||
|
|
d078a3047d | ||
|
|
f102ad73a8 | ||
|
|
671bd1b54d | ||
|
|
f8657e8798 | ||
|
|
61d9f1d981 | ||
|
|
9eb0318ee6 | ||
|
|
4528269102 | ||
|
|
8d3d32e376 | ||
|
|
c60afbb25b | ||
|
|
9bfa8603f6 | ||
|
|
30e60628bf | ||
|
|
df0270d0cc | ||
|
|
056cf8a7ca | ||
|
|
15c596a091 | ||
|
|
e950b2e478 | ||
|
|
4da753da79 | ||
|
|
3f81293fd4 | ||
|
|
120578091f | ||
|
|
604b723007 | ||
|
|
528181c759 | ||
|
|
cd5381900c | ||
|
|
bba2bb4b89 | ||
|
|
8fd27b8c23 | ||
|
|
6dcc8d8b84 | ||
|
|
9fb8b04b64 | ||
|
|
6ba5802958 | ||
|
|
f039670961 | ||
|
|
4ec12e7e68 | ||
|
|
66ed7d6162 | ||
|
|
617d59d70b | ||
|
|
40f444f4b8 | ||
|
|
8201d301d5 | ||
|
|
40ef3446f5 | ||
|
|
7213b2a814 | ||
|
|
9cfa25ab40 | ||
|
|
6db3444a25 | ||
|
|
15e930b691 | ||
|
|
abc5eaed88 | ||
|
|
f1b92e9e6c | ||
|
|
ad9a5196b3 | ||
|
|
db117855da | ||
|
|
ecfe98df6f | ||
|
|
479177eaf9 | ||
|
|
194f523fe1 | ||
|
|
29d367bdd4 | ||
|
|
d6e030eda7 |
83
.github/CONTRIBUTING.md
vendored
83
.github/CONTRIBUTING.md
vendored
@@ -188,6 +188,89 @@ To generate new vendored files with go modules run:
|
|||||||
$ make vendor
|
$ make vendor
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Generate profiling data
|
||||||
|
|
||||||
|
You can configure Buildx to generate [`pprof`](https://github.com/google/pprof)
|
||||||
|
memory and CPU profiles to analyze and optimize your builds. These profiles are
|
||||||
|
useful for identifying performance bottlenecks, detecting memory
|
||||||
|
inefficiencies, and ensuring the program (Buildx) runs efficiently.
|
||||||
|
|
||||||
|
The following environment variables control whether Buildx generates profiling
|
||||||
|
data for builds:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
When set, Buildx emits profiling samples for the builds to the location
|
||||||
|
specified by the environment variable.
|
||||||
|
|
||||||
|
To analyze and visualize profiling samples, you need `pprof` from the Go
|
||||||
|
toolchain, and (optionally) GraphViz for visualization in a graphical format.
|
||||||
|
|
||||||
|
To inspect profiling data with `pprof`:
|
||||||
|
|
||||||
|
1. Build a local binary of Buildx from source.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary gets exported to `./bin/build/buildx`.
|
||||||
|
|
||||||
|
2. Run a build and with the environment variables set to generate profiling data.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
$ ./bin/build/buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates `buildx_cpu.prof` and `buildx_mem.prof` for the build.
|
||||||
|
|
||||||
|
3. Start `pprof` and specify the filename of the profile that you want to
|
||||||
|
analyze.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof buildx_cpu.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
This opens the `pprof` interactive console. From here, you can inspect the
|
||||||
|
profiling sample using various commands. For example, use `top 10` command
|
||||||
|
to view the top 10 most time-consuming entries.
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
(pprof) top 10
|
||||||
|
Showing nodes accounting for 3.04s, 91.02% of 3.34s total
|
||||||
|
Dropped 123 nodes (cum <= 0.02s)
|
||||||
|
Showing top 10 nodes out of 159
|
||||||
|
flat flat% sum% cum cum%
|
||||||
|
1.14s 34.13% 34.13% 1.14s 34.13% syscall.syscall
|
||||||
|
0.91s 27.25% 61.38% 0.91s 27.25% runtime.kevent
|
||||||
|
0.35s 10.48% 71.86% 0.35s 10.48% runtime.pthread_cond_wait
|
||||||
|
0.22s 6.59% 78.44% 0.22s 6.59% runtime.pthread_cond_signal
|
||||||
|
0.15s 4.49% 82.93% 0.15s 4.49% runtime.usleep
|
||||||
|
0.10s 2.99% 85.93% 0.10s 2.99% runtime.memclrNoHeapPointers
|
||||||
|
0.10s 2.99% 88.92% 0.10s 2.99% runtime.memmove
|
||||||
|
0.03s 0.9% 89.82% 0.03s 0.9% runtime.madvise
|
||||||
|
0.02s 0.6% 90.42% 0.02s 0.6% runtime.(*mspan).typePointersOfUnchecked
|
||||||
|
0.02s 0.6% 91.02% 0.02s 0.6% runtime.pcvalue
|
||||||
|
```
|
||||||
|
|
||||||
|
To view the call graph in a GUI, run `go tool pprof -http=:8081 <sample>`.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Requires [GraphViz](https://www.graphviz.org/) to be installed.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof -http=:8081 buildx_cpu.prof
|
||||||
|
Serving web UI on http://127.0.0.1:8081
|
||||||
|
http://127.0.0.1:8081
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about using `pprof` and how to interpret the call graph,
|
||||||
|
refer to the [`pprof` README](https://github.com/google/pprof/blob/main/doc/README.md).
|
||||||
|
|
||||||
### Conventions
|
### Conventions
|
||||||
|
|
||||||
|
|||||||
50
.github/SECURITY.md
vendored
50
.github/SECURITY.md
vendored
@@ -1,12 +1,44 @@
|
|||||||
# Reporting security issues
|
# Security Policy
|
||||||
|
|
||||||
The project maintainers take security seriously. If you discover a security
|
The maintainers of Docker Buildx take security seriously. If you discover
|
||||||
issue, please bring it to their attention right away!
|
a security issue, please bring it to their attention right away!
|
||||||
|
|
||||||
**Please _DO NOT_ file a public issue**, instead send your report privately to
|
## Reporting a Vulnerability
|
||||||
[security@docker.com](mailto:security@docker.com).
|
|
||||||
|
|
||||||
Security reports are greatly appreciated, and we will publicly thank you for it.
|
Please **DO NOT** file a public issue, instead send your report privately
|
||||||
We also like to send gifts—if you're into schwag, make sure to let
|
to [security@docker.com](mailto:security@docker.com).
|
||||||
us know. We currently do not offer a paid security bounty program, but are not
|
|
||||||
ruling it out in the future.
|
Reporter(s) can expect a response within 72 hours, acknowledging the issue was
|
||||||
|
received.
|
||||||
|
|
||||||
|
## Review Process
|
||||||
|
|
||||||
|
After receiving the report, an initial triage and technical analysis is
|
||||||
|
performed to confirm the report and determine its scope. We may request
|
||||||
|
additional information in this stage of the process.
|
||||||
|
|
||||||
|
Once a reviewer has confirmed the relevance of the report, a draft security
|
||||||
|
advisory will be created on GitHub. The draft advisory will be used to discuss
|
||||||
|
the issue with maintainers, the reporter(s), and where applicable, other
|
||||||
|
affected parties under embargo.
|
||||||
|
|
||||||
|
If the vulnerability is accepted, a timeline for developing a patch, public
|
||||||
|
disclosure, and patch release will be determined. If there is an embargo period
|
||||||
|
on public disclosure before the patch release, the reporter(s) are expected to
|
||||||
|
participate in the discussion of the timeline and abide by agreed upon dates
|
||||||
|
for public disclosure.
|
||||||
|
|
||||||
|
## Accreditation
|
||||||
|
|
||||||
|
Security reports are greatly appreciated and we will publicly thank you,
|
||||||
|
although we will keep your name confidential if you request it. We also like to
|
||||||
|
send gifts - if you're into swag, make sure to let us know. We do not currently
|
||||||
|
offer a paid security bounty program at this time.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Once a new feature release is cut, support for the previous feature release is
|
||||||
|
discontinued. An exception may be made for urgent security releases that occur
|
||||||
|
shortly after a new feature release. Buildx does not offer LTS (Long-Term Support)
|
||||||
|
releases. Refer to the [Support Policy](https://github.com/docker/buildx/blob/master/PROJECT.md#support-policy)
|
||||||
|
for further details.
|
||||||
|
|||||||
5
.github/labeler.yml
vendored
5
.github/labeler.yml
vendored
@@ -96,6 +96,11 @@ area/hack:
|
|||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file: 'hack/**'
|
- any-glob-to-any-file: 'hack/**'
|
||||||
|
|
||||||
|
# Add 'area/history' label to changes in history command
|
||||||
|
area/history:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'commands/history/**'
|
||||||
|
|
||||||
# Add 'area/tests' label to changes in test files
|
# Add 'area/tests' label to changes in test files
|
||||||
area/tests:
|
area/tests:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
|
|||||||
172
.github/workflows/build.yml
vendored
172
.github/workflows/build.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: build
|
name: build
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -19,15 +28,15 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILDX_VERSION: "latest"
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
SCOUT_VERSION: "1.11.0"
|
SCOUT_VERSION: "1.11.0"
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
TEST_CACHE_SCOPE: "test"
|
TEST_CACHE_SCOPE: "test"
|
||||||
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||||
GOTESTSUM_FORMAT: "standard-verbose"
|
GOTESTSUM_FORMAT: "standard-verbose"
|
||||||
GO_VERSION: "1.22"
|
GO_VERSION: "1.23"
|
||||||
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -45,9 +54,9 @@ jobs:
|
|||||||
- master
|
- master
|
||||||
- latest
|
- latest
|
||||||
- buildx-stable-1
|
- buildx-stable-1
|
||||||
- v0.14.1
|
- v0.20.2
|
||||||
- v0.13.2
|
- v0.19.0
|
||||||
- v0.12.5
|
- v0.18.2
|
||||||
worker:
|
worker:
|
||||||
- docker-container
|
- docker-container
|
||||||
- remote
|
- remote
|
||||||
@@ -67,6 +76,26 @@ jobs:
|
|||||||
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
pkg: ./tests
|
pkg: ./tests
|
||||||
mode: experimental
|
mode: experimental
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
@@ -77,7 +106,7 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
||||||
case "${{ matrix.worker }}" in
|
case "${{ matrix.worker }}" in
|
||||||
docker | docker+containerd)
|
docker | docker+containerd | docker@* | docker+containerd@*)
|
||||||
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@@ -102,13 +131,14 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build test image
|
name: Build test image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
targets: integration-test
|
targets: integration-test
|
||||||
set: |
|
set: |
|
||||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||||
@@ -122,7 +152,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: always()
|
if: always()
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
directory: ./bin/testreports
|
directory: ./bin/testreports
|
||||||
flags: integration
|
flags: integration
|
||||||
@@ -149,11 +179,16 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- ubuntu-24.04
|
- ubuntu-24.04
|
||||||
- macos-12
|
- macos-14
|
||||||
- windows-2022
|
- windows-2022
|
||||||
env:
|
env:
|
||||||
SKIP_INTEGRATION_TESTS: 1
|
SKIP_INTEGRATION_TESTS: 1
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Setup Git config
|
||||||
|
run: |
|
||||||
|
git config --global core.autocrlf false
|
||||||
|
git config --global core.eol lf
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -194,7 +229,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: always()
|
if: always()
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
directory: ${{ env.TESTREPORTS_DIR }}
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
env_vars: RUNNER_OS
|
env_vars: RUNNER_OS
|
||||||
@@ -215,25 +250,88 @@ jobs:
|
|||||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
path: ${{ env.TESTREPORTS_BASEDIR }}
|
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||||
|
|
||||||
govulncheck:
|
test-bsd-unit:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
continue-on-error: true
|
||||||
# required to write sarif report
|
strategy:
|
||||||
security-events: write
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- openbsd
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Sets semver Go version to be able to download tarball during vagrant setup
|
||||||
|
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
|
||||||
|
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Cache Vagrant boxes
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/.vagrant.d/boxes
|
||||||
|
key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-vagrant-${{ matrix.os }}-
|
||||||
|
-
|
||||||
|
name: Install vagrant
|
||||||
|
run: |
|
||||||
|
set -x
|
||||||
|
wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||||
|
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||||
|
sudo systemctl enable --now libvirtd
|
||||||
|
sudo chmod a+rw /var/run/libvirt/libvirt-sock
|
||||||
|
vagrant plugin install vagrant-libvirt
|
||||||
|
vagrant --version
|
||||||
|
-
|
||||||
|
name: Set up vagrant
|
||||||
|
run: |
|
||||||
|
ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
|
||||||
|
vagrant up --no-tty
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
|
||||||
|
vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
|
||||||
|
-
|
||||||
|
name: Upload coverage
|
||||||
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
files: ./coverage.txt
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit,${{ matrix.os }}
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
env:
|
||||||
|
RUNNER_OS: ${{ matrix.os }}
|
||||||
|
|
||||||
|
govulncheck:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: govulncheck
|
targets: govulncheck
|
||||||
env:
|
env:
|
||||||
@@ -287,8 +385,8 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
@@ -314,8 +412,14 @@ jobs:
|
|||||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Free disk space
|
||||||
uses: actions/checkout@v4
|
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||||
|
with:
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
swap-storage: true
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
@@ -323,8 +427,8 @@ jobs:
|
|||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
@@ -347,11 +451,11 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
${{ steps.meta.outputs.bake-file }}
|
cwd://${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
sbom: true
|
sbom: true
|
||||||
@@ -363,14 +467,13 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
# required to write sarif report
|
# required to write sarif report
|
||||||
security-events: write
|
security-events: write
|
||||||
needs:
|
needs:
|
||||||
- bin-image
|
- bin-image
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
@@ -393,6 +496,9 @@ jobs:
|
|||||||
|
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# required to create GitHub release
|
||||||
|
contents: write
|
||||||
needs:
|
needs:
|
||||||
- test-integration
|
- test-integration
|
||||||
- test-unit
|
- test-unit
|
||||||
@@ -422,7 +528,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
|
|||||||
20
.github/workflows/codeql.yml
vendored
20
.github/workflows/codeql.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: codeql
|
name: codeql
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
@@ -7,17 +16,16 @@ on:
|
|||||||
- 'v[0-9]*'
|
- 'v[0-9]*'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: "1.22"
|
GO_VERSION: "1.23"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
codeql:
|
codeql:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
actions: read
|
||||||
|
security-events: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
|||||||
24
.github/workflows/docs-release.yml
vendored
24
.github/workflows/docs-release.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: docs-release
|
name: docs-release
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@@ -10,10 +19,17 @@ on:
|
|||||||
types:
|
types:
|
||||||
- released
|
- released
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
open-pr:
|
open-pr:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout docs repo
|
name: Checkout docs repo
|
||||||
@@ -34,9 +50,13 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Generate yaml
|
name: Generate yaml
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
@@ -57,7 +77,7 @@ jobs:
|
|||||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||||
-
|
-
|
||||||
name: Create PR on docs repo
|
name: Create PR on docs repo
|
||||||
uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 # v7.0.0
|
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
push-to-fork: docker-tools-robot/docker.github.io
|
||||||
|
|||||||
24
.github/workflows/docs-upstream.yml
vendored
24
.github/workflows/docs-upstream.yml
vendored
@@ -3,6 +3,15 @@
|
|||||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
||||||
name: docs-upstream
|
name: docs-upstream
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -20,21 +29,24 @@ on:
|
|||||||
- '.github/workflows/docs-upstream.yml'
|
- '.github/workflows/docs-upstream.yml'
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docs-yaml:
|
docs-yaml:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build reference YAML docs
|
name: Build reference YAML docs
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
provenance: false
|
provenance: false
|
||||||
@@ -53,7 +65,7 @@ jobs:
|
|||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
validate:
|
validate:
|
||||||
uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
|
uses: docker/docs/.github/workflows/validate-upstream.yml@main
|
||||||
needs:
|
needs:
|
||||||
- docs-yaml
|
- docs-yaml
|
||||||
with:
|
with:
|
||||||
|
|||||||
100
.github/workflows/e2e.yml
vendored
100
.github/workflows/e2e.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: e2e
|
name: e2e
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -17,23 +26,25 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
K3S_VERSION: "v1.21.2-k3s1"
|
K3S_VERSION: "v1.32.2+k3s1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: binaries
|
targets: binaries
|
||||||
set: |
|
set: |
|
||||||
@@ -54,7 +65,7 @@ jobs:
|
|||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
||||||
driver:
|
driver:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
strategy:
|
strategy:
|
||||||
@@ -142,7 +153,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
|
||||||
with:
|
with:
|
||||||
version: ${{ env.K3S_VERSION }}
|
version: ${{ env.K3S_VERSION }}
|
||||||
-
|
-
|
||||||
@@ -166,3 +177,78 @@ jobs:
|
|||||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||||
ENDPOINT: ${{ matrix.endpoint }}
|
ENDPOINT: ${{ matrix.endpoint }}
|
||||||
PLATFORMS: ${{ matrix.platforms }}
|
PLATFORMS: ${{ matrix.platforms }}
|
||||||
|
|
||||||
|
bake:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
env:
|
||||||
|
DOCKER_BUILD_CHECKS_ANNOTATIONS: false
|
||||||
|
DOCKER_BUILD_SUMMARY: false
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
-
|
||||||
|
# https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
|
||||||
|
source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
|
||||||
|
overrides: |
|
||||||
|
*.output=/tmp/bake-build
|
||||||
|
-
|
||||||
|
# https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
|
||||||
|
source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
|
||||||
|
targets: |
|
||||||
|
ld64-static-tgz
|
||||||
|
overrides: |
|
||||||
|
ld64-static-tgz.output=type=local,dest=./dist
|
||||||
|
ld64-static-tgz.platform=linux/amd64
|
||||||
|
ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
-
|
||||||
|
# https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
|
||||||
|
source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
|
||||||
|
targets: |
|
||||||
|
tests-buildkit
|
||||||
|
envs: |
|
||||||
|
BUILDKIT_REFS=v0.18.2
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Expose GitHub Runtime
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
-
|
||||||
|
name: Environment variables
|
||||||
|
if: matrix.envs != ''
|
||||||
|
run: |
|
||||||
|
for l in "${{ matrix.envs }}"; do
|
||||||
|
echo "${l?}" >> $GITHUB_ENV
|
||||||
|
done
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
-
|
||||||
|
name: Install buildx
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: binary
|
||||||
|
path: /home/runner/.docker/cli-plugins
|
||||||
|
-
|
||||||
|
name: Fix perms and check
|
||||||
|
run: |
|
||||||
|
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
|
||||||
|
docker buildx version
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: ${{ matrix.source }}
|
||||||
|
targets: ${{ matrix.targets }}
|
||||||
|
set: ${{ matrix.overrides }}
|
||||||
|
|||||||
17
.github/workflows/labeler.yml
vendored
17
.github/workflows/labeler.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: labeler
|
name: labeler
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -9,10 +18,12 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
labeler:
|
labeler:
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required for writing labels
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
|
|||||||
22
.github/workflows/validate.yml
vendored
22
.github/workflows/validate.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: validate
|
name: validate
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -16,6 +25,10 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.github/releases.json'
|
- '.github/releases.json'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
@@ -81,17 +94,16 @@ jobs:
|
|||||||
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
||||||
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Validate
|
name: Validate
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: ${{ matrix.target }}
|
targets: ${{ matrix.target }}
|
||||||
set: |
|
set: |
|
||||||
|
|||||||
@@ -1,26 +1,52 @@
|
|||||||
run:
|
run:
|
||||||
timeout: 30m
|
timeout: 30m
|
||||||
|
|
||||||
modules-download-mode: vendor
|
modules-download-mode: vendor
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- bodyclose
|
||||||
- govet
|
|
||||||
- depguard
|
- depguard
|
||||||
|
- forbidigo
|
||||||
|
- gocritic
|
||||||
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- makezero
|
||||||
- misspell
|
- misspell
|
||||||
- unused
|
- noctx
|
||||||
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
|
- testifylint
|
||||||
- typecheck
|
- typecheck
|
||||||
- nolintlint
|
- unused
|
||||||
- gosec
|
- whitespace
|
||||||
- forbidigo
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gocritic:
|
||||||
|
disabled-checks:
|
||||||
|
- "ifElseChain"
|
||||||
|
- "assignOp"
|
||||||
|
- "appendAssign"
|
||||||
|
- "singleCaseSwitch"
|
||||||
|
- "exitAfterDefer" # FIXME
|
||||||
|
importas:
|
||||||
|
alias:
|
||||||
|
# Enforce alias to prevent it accidentally being used instead of
|
||||||
|
# buildkit errdefs package (or vice-versa).
|
||||||
|
- pkg: "github.com/containerd/errdefs"
|
||||||
|
alias: "cerrdefs"
|
||||||
|
# Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
|
||||||
|
- pkg: "github.com/docker/docker/client"
|
||||||
|
alias: "dockerclient"
|
||||||
|
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
alias: "ocispecs"
|
||||||
|
- pkg: "github.com/opencontainers/go-digest"
|
||||||
|
alias: "digest"
|
||||||
govet:
|
govet:
|
||||||
enable:
|
enable:
|
||||||
- nilness
|
- nilness
|
||||||
@@ -43,14 +69,27 @@ linters-settings:
|
|||||||
desc: The io/ioutil package has been deprecated.
|
desc: The io/ioutil package has been deprecated.
|
||||||
forbidigo:
|
forbidigo:
|
||||||
forbid:
|
forbid:
|
||||||
|
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||||
|
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||||
|
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||||
|
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||||
gosec:
|
gosec:
|
||||||
excludes:
|
excludes:
|
||||||
- G204 # Audit use of command execution
|
- G204 # Audit use of command execution
|
||||||
- G402 # TLS MinVersion too low
|
- G402 # TLS MinVersion too low
|
||||||
|
- G115 # integer overflow conversion (TODO: verify these)
|
||||||
config:
|
config:
|
||||||
G306: "0644"
|
G306: "0644"
|
||||||
|
testifylint:
|
||||||
|
disable:
|
||||||
|
# disable rules that reduce the test condition
|
||||||
|
- "empty"
|
||||||
|
- "bool-compare"
|
||||||
|
- "len"
|
||||||
|
- "negative-positive"
|
||||||
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-files:
|
exclude-files:
|
||||||
|
|||||||
14
.mailmap
14
.mailmap
@@ -1,11 +1,25 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
|||||||
69
AUTHORS
69
AUTHORS
@@ -1,45 +1,112 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
accetto <34798830+accetto@users.noreply.github.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
|
Aleksa Sarai <cyphar@cyphar.com>
|
||||||
Alex Couture-Beil <alex@earthly.dev>
|
Alex Couture-Beil <alex@earthly.dev>
|
||||||
Andrew Haines <andrew.haines@zencargo.com>
|
Andrew Haines <andrew.haines@zencargo.com>
|
||||||
|
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||||
Anthony Poschen <zanven42@gmail.com>
|
Anthony Poschen <zanven42@gmail.com>
|
||||||
|
Arnold Sobanski <arnold@l4g.dev>
|
||||||
Artur Klauser <Artur.Klauser@computer.org>
|
Artur Klauser <Artur.Klauser@computer.org>
|
||||||
Batuhan Apaydın <developerguy2@gmail.com>
|
Avi Deitcher <avi@deitcher.net>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Ben Peachey <potherca@gmail.com>
|
||||||
|
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||||
Bin Du <bindu@microsoft.com>
|
Bin Du <bindu@microsoft.com>
|
||||||
Brandon Philips <brandon@ifup.org>
|
Brandon Philips <brandon@ifup.org>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Bryce Lampe <bryce@pulumi.com>
|
||||||
|
Cameron Adams <pnzreba@gmail.com>
|
||||||
|
Christian Dupuis <cd@atomist.com>
|
||||||
|
Cory Snider <csnider@mirantis.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
|
David Gageot <david.gageot@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Scott <dave@recoil.org>
|
||||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||||
Devin Bayer <dev@doubly.so>
|
Devin Bayer <dev@doubly.so>
|
||||||
Djordje Lukic <djordje.lukic@docker.com>
|
Djordje Lukic <djordje.lukic@docker.com>
|
||||||
|
Dmitry Makovey <dmakovey@gitlab.com>
|
||||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||||
Donghui Wang <977675308@qq.com>
|
Donghui Wang <977675308@qq.com>
|
||||||
|
Doug Borg <dougborg@apple.com>
|
||||||
|
Edgar Lee <edgarl@netflix.com>
|
||||||
|
Eli Treuherz <et@arenko.group>
|
||||||
|
Eliott Wiener <eliottwiener@gmail.com>
|
||||||
|
Elran Shefer <elran.shefer@velocity.tech>
|
||||||
faust <faustin@fala.red>
|
faust <faustin@fala.red>
|
||||||
Felipe Santos <felipecassiors@gmail.com>
|
Felipe Santos <felipecassiors@gmail.com>
|
||||||
|
Felix de Souza <fdesouza@palantir.com>
|
||||||
Fernando Miguel <github@FernandoMiguel.net>
|
Fernando Miguel <github@FernandoMiguel.net>
|
||||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||||
gracenoah <gracenoahgh@gmail.com>
|
gracenoah <gracenoahgh@gmail.com>
|
||||||
|
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||||
|
guoguangwu <guoguangwu@magic-shield.com>
|
||||||
Hollow Man <hollowman@hollowman.ml>
|
Hollow Man <hollowman@hollowman.ml>
|
||||||
|
Ian King'ori <kingorim.ian@gmail.com>
|
||||||
|
idnandre <andre@idntimes.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||||
|
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||||
|
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||||
|
Jonathan Piché <jpiche@coveo.com>
|
||||||
|
Justin Chadwell <me@jedevc.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
khs1994 <khs1994@khs1994.com>
|
khs1994 <khs1994@khs1994.com>
|
||||||
|
Kijima Daigo <norimaking777@gmail.com>
|
||||||
|
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||||
Kotaro Adachi <k33asby@gmail.com>
|
Kotaro Adachi <k33asby@gmail.com>
|
||||||
|
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||||
l00397676 <lujingxiao@huawei.com>
|
l00397676 <lujingxiao@huawei.com>
|
||||||
|
Laura Brehm <laurabrehm@hey.com>
|
||||||
|
Laurent Goderre <laurent.goderre@docker.com>
|
||||||
|
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||||
|
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||||
Michal Augustyn <michal.augustyn@mail.com>
|
Michal Augustyn <michal.augustyn@mail.com>
|
||||||
|
Milas Bowman <milas.bowman@docker.com>
|
||||||
|
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||||
|
Moleus <fafufuburr@gmail.com>
|
||||||
|
Nick Santos <nick.santos@docker.com>
|
||||||
|
Nick Sieger <nick@nicksieger.com>
|
||||||
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
|
Niklas Gehlen <niklas@namespacelabs.com>
|
||||||
Patrick Van Stee <patrick@vanstee.me>
|
Patrick Van Stee <patrick@vanstee.me>
|
||||||
|
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||||
|
Phong Tran <tran.pho@northeastern.edu>
|
||||||
|
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||||
|
Rob Murray <rob.murray@docker.com>
|
||||||
|
robertlestak <robert.lestak@umusic.com>
|
||||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||||
|
Sean P. Kane <spkane00@gmail.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Simon A. Eugster <simon.eu@gmail.com>
|
||||||
Solomon Hykes <sh.github.6811@hykes.org>
|
Solomon Hykes <sh.github.6811@hykes.org>
|
||||||
|
Sumner Warren <sumner.warren@gmail.com>
|
||||||
Sune Keller <absukl@almbrand.dk>
|
Sune Keller <absukl@almbrand.dk>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
|
Tim Smith <tismith@rvohealth.com>
|
||||||
|
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||||
|
Tyler Smith <tylerlwsmith@gmail.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Ulysses Souza <ulyssessouza@gmail.com>
|
Ulysses Souza <ulyssessouza@gmail.com>
|
||||||
|
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||||
Wang Jinglei <morlay.null@gmail.com>
|
Wang Jinglei <morlay.null@gmail.com>
|
||||||
|
Wei <daviseago@gmail.com>
|
||||||
|
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||||
Xiang Dai <764524258@qq.com>
|
Xiang Dai <764524258@qq.com>
|
||||||
|
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||||
zelahi <elahi.zuhayr@gmail.com>
|
zelahi <elahi.zuhayr@gmail.com>
|
||||||
|
Zero <tobewhatwewant@gmail.com>
|
||||||
|
zhyon404 <zhyong4@gmail.com>
|
||||||
|
Zsolt <zsolt.szeberenyi@figured.com>
|
||||||
|
|||||||
36
Dockerfile
36
Dockerfile
@@ -1,20 +1,27 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22
|
ARG GO_VERSION=1.23
|
||||||
ARG XX_VERSION=1.4.0
|
ARG ALPINE_VERSION=3.21
|
||||||
|
ARG XX_VERSION=1.6.1
|
||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
ARG DOCKER_VERSION=27.1.1
|
ARG DOCKER_VERSION=28.0.0
|
||||||
|
ARG DOCKER_VERSION_ALT_27=27.5.1
|
||||||
|
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||||
ARG GOTESTSUM_VERSION=v1.9.0
|
ARG GOTESTSUM_VERSION=v1.12.0
|
||||||
ARG REGISTRY_VERSION=2.8.0
|
ARG REGISTRY_VERSION=2.8.3
|
||||||
ARG BUILDKIT_VERSION=v0.14.1
|
ARG BUILDKIT_VERSION=v0.20.2
|
||||||
ARG UNDOCK_VERSION=0.7.0
|
ARG UNDOCK_VERSION=0.9.0
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
||||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
|
||||||
FROM registry:$REGISTRY_VERSION AS registry
|
FROM registry:$REGISTRY_VERSION AS registry
|
||||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||||
@@ -77,6 +84,7 @@ RUN --mount=type=bind,target=. \
|
|||||||
set -e
|
set -e
|
||||||
xx-go --wrap
|
xx-go --wrap
|
||||||
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
||||||
|
file /usr/bin/docker-buildx
|
||||||
xx-verify --static /usr/bin/docker-buildx
|
xx-verify --static /usr/bin/docker-buildx
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
@@ -95,7 +103,10 @@ FROM scratch AS binaries-unix
|
|||||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
||||||
|
|
||||||
FROM binaries-unix AS binaries-darwin
|
FROM binaries-unix AS binaries-darwin
|
||||||
|
FROM binaries-unix AS binaries-freebsd
|
||||||
FROM binaries-unix AS binaries-linux
|
FROM binaries-unix AS binaries-linux
|
||||||
|
FROM binaries-unix AS binaries-netbsd
|
||||||
|
FROM binaries-unix AS binaries-openbsd
|
||||||
|
|
||||||
FROM scratch AS binaries-windows
|
FROM scratch AS binaries-windows
|
||||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
||||||
@@ -120,16 +131,21 @@ COPY --link --from=gotestsum /out /usr/bin/
|
|||||||
COPY --link --from=registry /bin/registry /usr/bin/
|
COPY --link --from=registry /bin/registry /usr/bin/
|
||||||
COPY --link --from=docker-engine / /usr/bin/
|
COPY --link --from=docker-engine / /usr/bin/
|
||||||
COPY --link --from=docker-cli / /usr/bin/
|
COPY --link --from=docker-cli / /usr/bin/
|
||||||
|
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
|
||||||
|
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
|
||||||
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||||
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||||
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||||
COPY --link --from=binaries /buildx /usr/bin/
|
COPY --link --from=binaries /buildx /usr/bin/
|
||||||
|
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
|
||||||
|
|
||||||
FROM integration-test-base AS integration-test
|
FROM integration-test-base AS integration-test
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Release
|
# Release
|
||||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=binaries \
|
RUN --mount=from=binaries \
|
||||||
@@ -144,7 +160,7 @@ COPY --from=releaser /out/ /
|
|||||||
|
|
||||||
# Shell
|
# Shell
|
||||||
FROM docker:$DOCKER_VERSION AS dockerd-release
|
FROM docker:$DOCKER_VERSION AS dockerd-release
|
||||||
FROM alpine AS shell
|
FROM alpine:${ALPINE_VERSION} AS shell
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||||
|
|||||||
453
PROJECT.md
Normal file
453
PROJECT.md
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
# Project processing guide <!-- omit from toc -->
|
||||||
|
|
||||||
|
- [Project scope](#project-scope)
|
||||||
|
- [Labels](#labels)
|
||||||
|
- [Global](#global)
|
||||||
|
- [`area/`](#area)
|
||||||
|
- [`exp/`](#exp)
|
||||||
|
- [`impact/`](#impact)
|
||||||
|
- [`kind/`](#kind)
|
||||||
|
- [`needs/`](#needs)
|
||||||
|
- [`priority/`](#priority)
|
||||||
|
- [`status/`](#status)
|
||||||
|
- [Types of releases](#types-of-releases)
|
||||||
|
- [Feature releases](#feature-releases)
|
||||||
|
- [Release Candidates](#release-candidates)
|
||||||
|
- [Support Policy](#support-policy)
|
||||||
|
- [Contributing to Releases](#contributing-to-releases)
|
||||||
|
- [Patch releases](#patch-releases)
|
||||||
|
- [Milestones](#milestones)
|
||||||
|
- [Triage process](#triage-process)
|
||||||
|
- [Verify essential information](#verify-essential-information)
|
||||||
|
- [Classify the issue](#classify-the-issue)
|
||||||
|
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||||
|
- [Issue lifecycle](#issue-lifecycle)
|
||||||
|
- [Examples](#examples)
|
||||||
|
- [Submitting a bug](#submitting-a-bug)
|
||||||
|
- [Pull request review process](#pull-request-review-process)
|
||||||
|
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||||
|
- [Moving to a discussion](#moving-to-a-discussion)
|
||||||
|
- [Workflow automation](#workflow-automation)
|
||||||
|
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||||
|
- [Updating dependencies](#updating-dependencies)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project scope
|
||||||
|
|
||||||
|
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||||
|
|
||||||
|
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||||
|
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||||
|
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||||
|
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||||
|
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||||
|
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||||
|
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||||
|
|
||||||
|
## Labels
|
||||||
|
|
||||||
|
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||||
|
|
||||||
|
### Global
|
||||||
|
|
||||||
|
General attributes that can apply to nearly any issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||||
|
| `bot` | Issues, PRs | Created by a bot |
|
||||||
|
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||||
|
| `help wanted` | Issues, PRs | Assistance requested |
|
||||||
|
| `lgtm` | PRs | “Looks good to me” approval |
|
||||||
|
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||||
|
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||||
|
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||||
|
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||||
|
|
||||||
|
### `area/`
|
||||||
|
|
||||||
|
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------------------ | ---------- | -------------------------- |
|
||||||
|
| `area/bake` | Any | `bake` |
|
||||||
|
| `area/bake/compose` | Any | `bake/compose` |
|
||||||
|
| `area/build` | Any | `build` |
|
||||||
|
| `area/builder` | Any | `builder` |
|
||||||
|
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||||
|
| `area/cache` | Any | `cache` |
|
||||||
|
| `area/checks` | Any | `checks` |
|
||||||
|
| `area/ci` | Any | Project CI |
|
||||||
|
| `area/cli` | Any | `cli` |
|
||||||
|
| `area/controller` | Any | `controller` |
|
||||||
|
| `area/debug` | Any | `debug` |
|
||||||
|
| `area/dependencies` | Any | Project dependencies |
|
||||||
|
| `area/dockerfile` | Any | `dockerfile` |
|
||||||
|
| `area/docs` | Any | `docs` |
|
||||||
|
| `area/driver` | Any | `driver` |
|
||||||
|
| `area/driver/docker` | Any | `driver/docker` |
|
||||||
|
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||||
|
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||||
|
| `area/driver/remote` | Any | `driver/remote` |
|
||||||
|
| `area/feature-parity` | Any | `feature-parity` |
|
||||||
|
| `area/github-actions` | Any | `github-actions` |
|
||||||
|
| `area/hack` | Any | Project hack/support |
|
||||||
|
| `area/imagetools` | Any | `imagetools` |
|
||||||
|
| `area/metrics` | Any | `metrics` |
|
||||||
|
| `area/moby` | Any | Relates to `moby/moby` |
|
||||||
|
| `area/project` | Any | Project support |
|
||||||
|
| `area/qemu` | Any | `qemu` |
|
||||||
|
| `area/tests` | Any | Project testing |
|
||||||
|
| `area/windows` | Any | `windows` |
|
||||||
|
|
||||||
|
### `exp/`
|
||||||
|
|
||||||
|
Estimated experience level to complete the item
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||||
|
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||||
|
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||||
|
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||||
|
|
||||||
|
### `impact/`
|
||||||
|
|
||||||
|
Potential impact areas of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| -------------------- | ---------- | -------------------------------------------------- |
|
||||||
|
| `impact/breaking` | PR | Change is API-breaking |
|
||||||
|
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||||
|
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||||
|
|
||||||
|
|
||||||
|
### `kind/`
|
||||||
|
|
||||||
|
The type of issue, pull request or discussion
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||||
|
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||||
|
| `kind/chore` | Issue, PR | Project support tasks |
|
||||||
|
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||||
|
| `kind/duplicate` | Any | Duplicate of another item |
|
||||||
|
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||||
|
| `kind/feature` | Any | A brand new feature |
|
||||||
|
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||||
|
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||||
|
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||||
|
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||||
|
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||||
|
|
||||||
|
### `needs/`
|
||||||
|
|
||||||
|
Actions or missing requirements needed by the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||||
|
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||||
|
| `needs/code-review` | PR | Needs review of code |
|
||||||
|
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||||
|
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||||
|
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||||
|
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||||
|
| `needs/issue` | PR | Needs an issue |
|
||||||
|
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||||
|
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||||
|
| `needs/more-info` | Any | Needs more information from the author |
|
||||||
|
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||||
|
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||||
|
| `needs/pull-request` | Issue | Needs a pull request |
|
||||||
|
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||||
|
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||||
|
|
||||||
|
### `priority/`
|
||||||
|
|
||||||
|
Level of urgency of a `kind/bug` issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||||
|
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||||
|
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||||
|
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||||
|
|
||||||
|
### `status/`
|
||||||
|
|
||||||
|
Current lifecycle state of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||||
|
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||||
|
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||||
|
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||||
|
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||||
|
| `status/transfer` | Any | Transferred to another project |
|
||||||
|
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||||
|
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||||
|
|
||||||
|
## Types of releases
|
||||||
|
|
||||||
|
This project has feature releases, patch releases, and security releases.
|
||||||
|
|
||||||
|
### Feature releases
|
||||||
|
|
||||||
|
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||||
|
|
||||||
|
#### Release Candidates
|
||||||
|
|
||||||
|
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||||
|
|
||||||
|
#### Support Policy
|
||||||
|
|
||||||
|
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||||
|
|
||||||
|
#### Contributing to Releases
|
||||||
|
|
||||||
|
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||||
|
|
||||||
|
### Patch releases
|
||||||
|
|
||||||
|
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||||
|
|
||||||
|
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||||
|
|
||||||
|
Following PRs are included in patch releases
|
||||||
|
|
||||||
|
- `priority/P0` fixes
|
||||||
|
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||||
|
- `priority/P2` fixes, only if (both required)
|
||||||
|
- proposed by maintainer
|
||||||
|
- the patch is trivial and self-contained
|
||||||
|
- Documentation-only patches
|
||||||
|
- Vendored dependency updates, only if:
|
||||||
|
- Fixing (qualifying) bug or security issue in Buildx
|
||||||
|
- The patch is small, else a forked version of the dependency with only the patches required
|
||||||
|
|
||||||
|
New features do not qualify for patch release.
|
||||||
|
|
||||||
|
## Milestones
|
||||||
|
|
||||||
|
Milestones are used to help identify what releases a contribution will be in.
|
||||||
|
|
||||||
|
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||||
|
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||||
|
|
||||||
|
## Triage process
|
||||||
|
|
||||||
|
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||||
|
|
||||||
|
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||||
|
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||||
|
- Lowering the issue count by preventing duplicate issues.
|
||||||
|
- Streamlining the development process by preventing duplicate discussions.
|
||||||
|
|
||||||
|
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||||
|
|
||||||
|
1. Verify essential information
|
||||||
|
2. Classify the issue
|
||||||
|
3. Prioritizing the issue
|
||||||
|
|
||||||
|
### Verify essential information
|
||||||
|
|
||||||
|
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||||
|
|
||||||
|
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||||
|
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||||
|
|
||||||
|
### Classify the issue
|
||||||
|
|
||||||
|
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||||
|
|
||||||
|
- (Required) One or more [`area/*`](#area) labels
|
||||||
|
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||||
|
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||||
|
|
||||||
|
When assigning a decision the following labels should be present:
|
||||||
|
|
||||||
|
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||||
|
|
||||||
|
Additional labels can provide more clarity:
|
||||||
|
|
||||||
|
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||||
|
- Zero or more [`impact/*`](#impact) labels
|
||||||
|
- One [`exp/*`](#exp) label
|
||||||
|
|
||||||
|
## Prioritization guidelines for `kind/bug`
|
||||||
|
|
||||||
|
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||||
|
|
||||||
|
**priority/P0**
|
||||||
|
|
||||||
|
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Regression in a critical code path
|
||||||
|
- Panic in a critical code path
|
||||||
|
- Corruption in critical code path or rest of the system
|
||||||
|
- Leaked zero-day critical security
|
||||||
|
|
||||||
|
**priority/P1**
|
||||||
|
|
||||||
|
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Any regression, panic
|
||||||
|
- Measurable performance regression
|
||||||
|
- A major bug in a new feature in the latest release
|
||||||
|
- Incompatibility with upgraded external dependency
|
||||||
|
|
||||||
|
**priority/P2**
|
||||||
|
|
||||||
|
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Confirmed bugs
|
||||||
|
- Bugs in non-default configurations
|
||||||
|
- Most enhancements
|
||||||
|
|
||||||
|
## Issue lifecycle
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
create([New issue]) --> triage
|
||||||
|
subgraph triage[Triage Loop]
|
||||||
|
review[Review]
|
||||||
|
end
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept[Accept]
|
||||||
|
close[Close]
|
||||||
|
end
|
||||||
|
triage -- if accepted --> accept[Assign status, milestone]
|
||||||
|
triage -- if rejected --> close[Assign status, close issue]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
#### Submitting a bug
|
||||||
|
|
||||||
|
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
|
||||||
|
new([New issue])
|
||||||
|
|
||||||
|
subgraph triage[Triage]
|
||||||
|
direction LR
|
||||||
|
|
||||||
|
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||||
|
style create text-align:left
|
||||||
|
|
||||||
|
subgraph review[Review]
|
||||||
|
direction TB
|
||||||
|
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style classify text-align:left
|
||||||
|
|
||||||
|
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style update text-align:left
|
||||||
|
|
||||||
|
classify --> update
|
||||||
|
update --> classify
|
||||||
|
end
|
||||||
|
|
||||||
|
create --> review
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||||
|
style accept text-align: left
|
||||||
|
end
|
||||||
|
|
||||||
|
new --> triage
|
||||||
|
triage --> decision
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pull request review process
|
||||||
|
|
||||||
|
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||||
|
|
||||||
|
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||||
|
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||||
|
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
triage([Triage])
|
||||||
|
draft[Draft PR]
|
||||||
|
review[PR Review]
|
||||||
|
closed{{Close PR}}
|
||||||
|
merge{{Merge PR}}
|
||||||
|
|
||||||
|
subgraph feedback1[Feedback Loop]
|
||||||
|
draft
|
||||||
|
end
|
||||||
|
subgraph feedback2[Feedback Loop]
|
||||||
|
review
|
||||||
|
end
|
||||||
|
|
||||||
|
triage --> draft
|
||||||
|
draft --> review
|
||||||
|
review --> closed
|
||||||
|
review --> draft
|
||||||
|
review --> merge
|
||||||
|
```
|
||||||
|
|
||||||
|
## Handling stalled issues and pull requests
|
||||||
|
|
||||||
|
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||||
|
|
||||||
|
**For Authors:**
|
||||||
|
|
||||||
|
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||||
|
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||||
|
|
||||||
|
**For Maintainers:**
|
||||||
|
|
||||||
|
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||||
|
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||||
|
|
||||||
|
**Stale and Rotten Policy:**
|
||||||
|
|
||||||
|
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||||
|
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||||
|
|
||||||
|
**Skipping Stale Processing:**
|
||||||
|
|
||||||
|
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||||
|
|
||||||
|
**Exceptions to Stale Processing:**
|
||||||
|
|
||||||
|
- Issues or PRs marked as **`frozen`**.
|
||||||
|
- Issues or PRs assigned to a milestone.
|
||||||
|
|
||||||
|
## Moving to a discussion
|
||||||
|
|
||||||
|
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||||
|
|
||||||
|
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||||
|
|
||||||
|
## Workflow automation
|
||||||
|
|
||||||
|
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||||
|
|
||||||
|
- Stale issue or pull request processing
|
||||||
|
- Auto-labeling actions
|
||||||
|
- Auto-response actions
|
||||||
|
- Label carry over from issue to pull request
|
||||||
|
|
||||||
|
### Exempting an issue/PR from stale bot processing
|
||||||
|
|
||||||
|
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||||
|
|
||||||
|
- Add the item to a milestone
|
||||||
|
- Add the `frozen` label to the item
|
||||||
|
|
||||||
|
## Updating dependencies
|
||||||
|
|
||||||
|
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||||
|
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||||
|
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||||
|
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
||||||
619
bake/bake.go
619
bake/bake.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
111
bake/compose.go
111
bake/compose.go
@@ -5,13 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/v2/consts"
|
"github.com/compose-spec/compose-go/v2/consts"
|
||||||
"github.com/compose-spec/compose-go/v2/dotenv"
|
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||||
"github.com/compose-spec/compose-go/v2/loader"
|
"github.com/compose-spec/compose-go/v2/loader"
|
||||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -91,6 +92,9 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
if s.Build.AdditionalContexts != nil {
|
if s.Build.AdditionalContexts != nil {
|
||||||
additionalContexts = map[string]string{}
|
additionalContexts = map[string]string{}
|
||||||
for k, v := range s.Build.AdditionalContexts {
|
for k, v := range s.Build.AdditionalContexts {
|
||||||
|
if strings.HasPrefix(v, "service:") {
|
||||||
|
v = strings.Replace(v, "service:", "target:", 1)
|
||||||
|
}
|
||||||
additionalContexts[k] = v
|
additionalContexts[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,6 +106,12 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
shmSize = &shmSizeStr
|
shmSize = &shmSizeStr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var networkModeP *string
|
||||||
|
if s.Build.Network != "" {
|
||||||
|
networkMode := s.Build.Network
|
||||||
|
networkModeP = &networkMode
|
||||||
|
}
|
||||||
|
|
||||||
var ulimits []string
|
var ulimits []string
|
||||||
if s.Build.Ulimits != nil {
|
if s.Build.Ulimits != nil {
|
||||||
for n, u := range s.Build.Ulimits {
|
for n, u := range s.Build.Ulimits {
|
||||||
@@ -113,14 +123,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ssh []string
|
var ssh []*buildflags.SSH
|
||||||
for _, bkey := range s.Build.SSH {
|
for _, bkey := range s.Build.SSH {
|
||||||
sshkey := composeToBuildkitSSH(bkey)
|
sshkey := composeToBuildkitSSH(bkey)
|
||||||
ssh = append(ssh, sshkey)
|
ssh = append(ssh, sshkey)
|
||||||
}
|
}
|
||||||
sort.Strings(ssh)
|
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
|
|
||||||
var secrets []string
|
var secrets []*buildflags.Secret
|
||||||
for _, bs := range s.Build.Secrets {
|
for _, bs := range s.Build.Secrets {
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -136,6 +148,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
labels[k] = &v
|
labels[k] = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
g.Targets = append(g.Targets, targetName)
|
g.Targets = append(g.Targets, targetName)
|
||||||
t := &Target{
|
t := &Target{
|
||||||
Name: targetName,
|
Name: targetName,
|
||||||
@@ -152,9 +174,10 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: cacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
CacheTo: cacheTo,
|
||||||
NetworkMode: &s.Build.Network,
|
NetworkMode: networkModeP,
|
||||||
|
Platforms: s.Build.Platforms,
|
||||||
SSH: ssh,
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
ShmSize: shmSize,
|
ShmSize: shmSize,
|
||||||
@@ -173,7 +196,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
}
|
}
|
||||||
c.Groups = append(c.Groups, g)
|
c.Groups = append(c.Groups, g)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
@@ -196,7 +218,7 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateCompose(dt []byte, envs map[string]string) error {
|
func validateCompose(dt []byte, envs map[string]string) error {
|
||||||
_, err := loader.Load(composetypes.ConfigDetails{
|
_, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||||
ConfigFiles: []composetypes.ConfigFile{
|
ConfigFiles: []composetypes.ConfigFile{
|
||||||
{
|
{
|
||||||
Content: dt,
|
Content: dt,
|
||||||
@@ -292,10 +314,12 @@ type xbake struct {
|
|||||||
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
type stringMap map[string]string
|
type (
|
||||||
type stringArray []string
|
stringMap map[string]string
|
||||||
|
stringArray []string
|
||||||
|
)
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var multi []string
|
var multi []string
|
||||||
err := unmarshal(&multi)
|
err := unmarshal(&multi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -312,7 +336,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
func (t *Target) composeExtTarget(exts map[string]any) error {
|
||||||
var xb xbake
|
var xb xbake
|
||||||
|
|
||||||
ext, ok := exts["x-bake"]
|
ext, ok := exts["x-bake"]
|
||||||
@@ -329,23 +353,45 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheFrom) > 0 {
|
if len(xb.CacheFrom) > 0 {
|
||||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||||
}
|
}
|
||||||
if len(xb.CacheTo) > 0 {
|
if len(xb.CacheTo) > 0 {
|
||||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||||
}
|
}
|
||||||
if len(xb.Secrets) > 0 {
|
if len(xb.Secrets) > 0 {
|
||||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Secrets = t.Secrets.Merge(secrets)
|
||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
|
||||||
sort.Strings(t.SSH)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.SSH = t.SSH.Merge(ssh)
|
||||||
|
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
}
|
}
|
||||||
if len(xb.Outputs) > 0 {
|
if len(xb.Outputs) > 0 {
|
||||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Outputs = t.Outputs.Merge(outputs)
|
||||||
}
|
}
|
||||||
if xb.Pull != nil {
|
if xb.Pull != nil {
|
||||||
t.Pull = xb.Pull
|
t.Pull = xb.Pull
|
||||||
@@ -365,35 +411,30 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
|
||||||
if psecret.External {
|
if psecret.External {
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bkattrs []string
|
secret := &buildflags.Secret{}
|
||||||
if inp.Source != "" {
|
if inp.Source != "" {
|
||||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
secret.ID = inp.Source
|
||||||
}
|
}
|
||||||
if psecret.File != "" {
|
if psecret.File != "" {
|
||||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
secret.FilePath = psecret.File
|
||||||
}
|
}
|
||||||
if psecret.Environment != "" {
|
if psecret.Environment != "" {
|
||||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
secret.Env = psecret.Environment
|
||||||
}
|
}
|
||||||
|
return secret, nil
|
||||||
return strings.Join(bkattrs, ","), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
|
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
|
||||||
var bkattrs []string
|
bkssh := &buildflags.SSH{ID: sshKey.ID}
|
||||||
|
|
||||||
bkattrs = append(bkattrs, sshKey.ID)
|
|
||||||
|
|
||||||
if sshKey.Path != "" {
|
if sshKey.Path != "" {
|
||||||
bkattrs = append(bkattrs, sshKey.Path)
|
bkssh.Paths = []string{sshKey.Path}
|
||||||
}
|
}
|
||||||
|
return bkssh
|
||||||
return strings.Join(bkattrs, "=")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestParseCompose(t *testing.T) {
|
func TestParseCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build: ./db
|
build: ./db
|
||||||
@@ -33,7 +33,7 @@ services:
|
|||||||
cache_to:
|
cache_to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
ssh:
|
ssh:
|
||||||
- key=path/to/key
|
- key=/path/to/key
|
||||||
- default
|
- default
|
||||||
secrets:
|
secrets:
|
||||||
- token
|
- token
|
||||||
@@ -74,14 +74,14 @@ secrets:
|
|||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
|
||||||
"id=aws,src=/root/.aws/credentials",
|
"id=aws,src=/root/.aws/credentials",
|
||||||
}, c.Targets[1].Secrets)
|
"id=token,env=ENV_TOKEN",
|
||||||
|
}, stringify(c.Targets[1].Secrets))
|
||||||
|
|
||||||
require.Equal(t, "webapp2", c.Targets[2].Name)
|
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||||
require.Equal(t, "dir", *c.Targets[2].Context)
|
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||||
@@ -89,7 +89,7 @@ secrets:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
external:
|
external:
|
||||||
image: "verycooldb:1337"
|
image: "verycooldb:1337"
|
||||||
@@ -103,7 +103,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -129,7 +129,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -153,7 +153,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildArgEnvCompose(t *testing.T) {
|
func TestBuildArgEnvCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
@@ -179,7 +179,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestInconsistentComposeFile(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
@@ -190,7 +190,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
networks:
|
networks:
|
||||||
@@ -215,7 +215,7 @@ networks:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTags(t *testing.T) {
|
func TestTags(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
image: example
|
image: example
|
||||||
@@ -233,7 +233,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
@@ -269,7 +269,7 @@ networks:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExt(t *testing.T) {
|
func TestComposeExt(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
addon:
|
addon:
|
||||||
image: ct-addon:bar
|
image: ct-addon:bar
|
||||||
@@ -283,7 +283,7 @@ services:
|
|||||||
tags:
|
tags:
|
||||||
- ct-addon:baz
|
- ct-addon:baz
|
||||||
ssh:
|
ssh:
|
||||||
key: path/to/key
|
key: /path/to/key
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
@@ -336,23 +336,23 @@ services:
|
|||||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
||||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
|
||||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||||
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
image: app:bar
|
image: app:bar
|
||||||
@@ -383,9 +383,9 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
@@ -396,7 +396,7 @@ func TestEnv(t *testing.T) {
|
|||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) {
|
|||||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -443,7 +443,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
func TestPorts(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
@@ -463,6 +463,21 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPlatforms(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
`)
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
|
}
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
func newBool(val bool) *bool {
|
||||||
b := val
|
b := val
|
||||||
return &b
|
return &b
|
||||||
@@ -664,7 +679,7 @@ target "default" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeNullArgs(t *testing.T) {
|
func TestComposeNullArgs(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -680,7 +695,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOn(t *testing.T) {
|
func TestDependsOn(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
@@ -711,7 +726,7 @@ services:
|
|||||||
`), 0644)
|
`), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
include:
|
include:
|
||||||
- compose-foo.yml
|
- compose-foo.yml
|
||||||
|
|
||||||
@@ -740,7 +755,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDevelop(t *testing.T) {
|
func TestDevelop(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -759,7 +774,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCgroup(t *testing.T) {
|
func TestCgroup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -772,7 +787,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestProjectName(t *testing.T) {
|
func TestProjectName(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -798,6 +813,37 @@ services:
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServiceContext(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
base:
|
||||||
|
build:
|
||||||
|
dockerfile: baseapp.Dockerfile
|
||||||
|
command: ./entrypoint.sh
|
||||||
|
webapp:
|
||||||
|
build:
|
||||||
|
context: ./dir
|
||||||
|
additional_contexts:
|
||||||
|
base: service:base
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
|
sort.Strings(c.Groups[0].Targets)
|
||||||
|
require.Equal(t, []string{"base", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
|
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
|
||||||
|
}
|
||||||
|
|
||||||
// chdir changes the current working directory to the named directory,
|
// chdir changes the current working directory to the named directory,
|
||||||
// and then restore the original working directory at the end of the test.
|
// and then restore the original working directory at the end of the test.
|
||||||
func chdir(t *testing.T, dir string) {
|
func chdir(t *testing.T, dir string) {
|
||||||
|
|||||||
@@ -2,17 +2,25 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EntitlementKey string
|
type EntitlementKey string
|
||||||
@@ -20,6 +28,7 @@ type EntitlementKey string
|
|||||||
const (
|
const (
|
||||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||||
|
EntitlementKeyDevice EntitlementKey = "device"
|
||||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||||
EntitlementKeyFS EntitlementKey = "fs"
|
EntitlementKeyFS EntitlementKey = "fs"
|
||||||
@@ -32,6 +41,7 @@ const (
|
|||||||
type EntitlementConf struct {
|
type EntitlementConf struct {
|
||||||
NetworkHost bool
|
NetworkHost bool
|
||||||
SecurityInsecure bool
|
SecurityInsecure bool
|
||||||
|
Devices *EntitlementsDevicesConf
|
||||||
FSRead []string
|
FSRead []string
|
||||||
FSWrite []string
|
FSWrite []string
|
||||||
ImagePush []string
|
ImagePush []string
|
||||||
@@ -39,6 +49,11 @@ type EntitlementConf struct {
|
|||||||
SSH bool
|
SSH bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EntitlementsDevicesConf struct {
|
||||||
|
All bool
|
||||||
|
Devices map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||||
var conf EntitlementConf
|
var conf EntitlementConf
|
||||||
for _, e := range in {
|
for _, e := range in {
|
||||||
@@ -52,6 +67,22 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
|||||||
default:
|
default:
|
||||||
k, v, _ := strings.Cut(e, "=")
|
k, v, _ := strings.Cut(e, "=")
|
||||||
switch k {
|
switch k {
|
||||||
|
case string(EntitlementKeyDevice):
|
||||||
|
if v == "" {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
|
||||||
|
}
|
||||||
|
if conf.Devices == nil {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if conf.Devices.Devices == nil {
|
||||||
|
conf.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
conf.Devices.Devices[fields[0]] = struct{}{}
|
||||||
case string(EntitlementKeyFSRead):
|
case string(EntitlementKeyFSRead):
|
||||||
conf.FSRead = append(conf.FSRead, v)
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
case string(EntitlementKeyFSWrite):
|
case string(EntitlementKeyFSWrite):
|
||||||
@@ -67,10 +98,8 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
|||||||
conf.ImagePush = append(conf.ImagePush, v)
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
default:
|
default:
|
||||||
return conf, errors.Errorf("uknown entitlement key %q", k)
|
return conf, errors.Errorf("unknown entitlement key %q", k)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: dedupe slices and parent paths
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return conf, nil
|
return conf, nil
|
||||||
@@ -90,21 +119,99 @@ func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf,
|
|||||||
|
|
||||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||||
for _, e := range bo.Allow {
|
for _, e := range bo.Allow {
|
||||||
|
k, rest, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case entitlements.EntitlementDevice.String():
|
||||||
|
if rest == "" {
|
||||||
|
if c.Devices == nil || !c.Devices.All {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(rest, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
|
||||||
|
}
|
||||||
|
if expected.Devices == nil {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if expected.Devices.Devices == nil {
|
||||||
|
expected.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
expected.Devices.Devices[fields[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
switch e {
|
switch e {
|
||||||
case entitlements.EntitlementNetworkHost:
|
case entitlements.EntitlementNetworkHost.String():
|
||||||
if !c.NetworkHost {
|
if !c.NetworkHost {
|
||||||
expected.NetworkHost = true
|
expected.NetworkHost = true
|
||||||
}
|
}
|
||||||
case entitlements.EntitlementSecurityInsecure:
|
case entitlements.EntitlementSecurityInsecure.String():
|
||||||
if !c.SecurityInsecure {
|
if !c.SecurityInsecure {
|
||||||
expected.SecurityInsecure = true
|
expected.SecurityInsecure = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rwPaths := map[string]struct{}{}
|
||||||
|
roPaths := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, p := range collectLocalPaths(bo.Inputs) {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range bo.ExportsLocalPathsTemporary {
|
||||||
|
rwPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ce := range bo.CacheTo {
|
||||||
|
if ce.Type == "local" {
|
||||||
|
if dest, ok := ce.Attrs["dest"]; ok {
|
||||||
|
rwPaths[dest] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ci := range bo.CacheFrom {
|
||||||
|
if ci.Type == "local" {
|
||||||
|
if src, ok := ci.Attrs["src"]; ok {
|
||||||
|
roPaths[src] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range bo.SecretSpecs {
|
||||||
|
if secret.FilePath != "" {
|
||||||
|
roPaths[secret.FilePath] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ssh := range bo.SSHSpecs {
|
||||||
|
for _, p := range ssh.Paths {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
if len(ssh.Paths) == 0 {
|
||||||
|
if !c.SSH {
|
||||||
|
expected.SSH = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
expected.FSRead, err = findMissingPaths(c.FSRead, roPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected.FSWrite, err = findMissingPaths(c.FSWrite, rwPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Writer) error {
|
||||||
var term bool
|
var term bool
|
||||||
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||||
term = true
|
term = true
|
||||||
@@ -113,35 +220,93 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
var msgs []string
|
var msgs []string
|
||||||
var flags []string
|
var flags []string
|
||||||
|
|
||||||
|
// these warnings are currently disabled to give users time to update
|
||||||
|
var msgsFS []string
|
||||||
|
var flagsFS []string
|
||||||
|
|
||||||
if c.NetworkHost {
|
if c.NetworkHost {
|
||||||
msgs = append(msgs, " - Running build containers that can access host network")
|
msgs = append(msgs, " - Running build containers that can access host network")
|
||||||
flags = append(flags, "network.host")
|
flags = append(flags, string(EntitlementKeyNetworkHost))
|
||||||
}
|
}
|
||||||
if c.SecurityInsecure {
|
if c.SecurityInsecure {
|
||||||
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||||
flags = append(flags, "security.insecure")
|
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msgs) == 0 {
|
if c.Devices != nil {
|
||||||
|
if c.Devices.All {
|
||||||
|
msgs = append(msgs, " - Access to CDI devices")
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice))
|
||||||
|
} else {
|
||||||
|
for d := range c.Devices.Devices {
|
||||||
|
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SSH {
|
||||||
|
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
roPaths, rwPaths, commonPaths := groupSamePaths(c.FSRead, c.FSWrite)
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get current working directory")
|
||||||
|
}
|
||||||
|
wd, err = filepath.EvalSymlinks(wd)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to evaluate working directory")
|
||||||
|
}
|
||||||
|
roPaths = toRelativePaths(roPaths, wd)
|
||||||
|
rwPaths = toRelativePaths(rwPaths, wd)
|
||||||
|
commonPaths = toRelativePaths(commonPaths, wd)
|
||||||
|
|
||||||
|
if len(commonPaths) > 0 {
|
||||||
|
for _, p := range commonPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read and write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFS)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(roPaths) > 0 {
|
||||||
|
for _, p := range roPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSRead)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rwPaths) > 0 {
|
||||||
|
for _, p := range rwPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSWrite)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) == 0 && len(msgsFS) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||||
for _, m := range msgs {
|
for _, m := range slices.Concat(msgs, msgsFS) {
|
||||||
fmt.Fprintf(out, "%s\n", m)
|
fmt.Fprintf(out, "%s\n", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, f := range flags {
|
for i, f := range flags {
|
||||||
flags[i] = "--allow=" + f
|
flags[i] = "--allow=" + f
|
||||||
}
|
}
|
||||||
|
for i, f := range flagsFS {
|
||||||
if term {
|
flagsFS[i] = "--allow=" + f
|
||||||
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
args := append([]string(nil), os.Args...)
|
if term {
|
||||||
|
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
args := slices.Clone(os.Args)
|
||||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||||
args[0] = v
|
args[0] = v
|
||||||
}
|
}
|
||||||
@@ -149,7 +314,33 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
|
|
||||||
if idx != -1 {
|
if idx != -1 {
|
||||||
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||||
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " "))
|
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
fsEntitlementsEnabled := true
|
||||||
|
if isRemote {
|
||||||
|
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = !vv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
|
||||||
|
if fsEntitlementsSet {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BUILDX_BAKE_ENTITLEMENTS_FS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = vv
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fsEntitlementsEnabled && len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
|
||||||
|
fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if term {
|
if term {
|
||||||
@@ -173,3 +364,296 @@ func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
|||||||
|
|
||||||
return errors.Errorf("additional privileges requested")
|
return errors.Errorf("additional privileges requested")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isParentOrEqualPath(p, parent string) bool {
|
||||||
|
if p == parent || parent == "/" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(p, filepath.Clean(parent+string(filepath.Separator))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func findMissingPaths(set []string, paths map[string]struct{}) ([]string, error) {
|
||||||
|
set, allowAny, err := evaluatePaths(set)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if allowAny {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, err = evaluateToExistingPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
paths, err = dedupPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]string, 0, len(paths))
|
||||||
|
loop0:
|
||||||
|
for p := range paths {
|
||||||
|
for _, c := range set {
|
||||||
|
if isParentOrEqualPath(p, c) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(out)
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dedupPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
arr := make([]string, 0, len(in))
|
||||||
|
for p := range in {
|
||||||
|
arr = append(arr, filepath.Clean(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(arr, func(a, b string) int {
|
||||||
|
return cmp.Compare(len(a), len(b))
|
||||||
|
})
|
||||||
|
|
||||||
|
m := make(map[string]struct{}, len(arr))
|
||||||
|
loop0:
|
||||||
|
for _, p := range arr {
|
||||||
|
for parent := range m {
|
||||||
|
if strings.HasPrefix(p, parent+string(filepath.Separator)) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[p] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRelativePaths(in []string, wd string) []string {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
for _, p := range in {
|
||||||
|
rel, err := filepath.Rel(wd, p)
|
||||||
|
if err == nil {
|
||||||
|
// allow up to one level of ".." in the path
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)+"..") {
|
||||||
|
out = append(out, rel)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func groupSamePaths(in1, in2 []string) ([]string, []string, []string) {
|
||||||
|
if in1 == nil || in2 == nil {
|
||||||
|
return in1, in2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(in1)
|
||||||
|
slices.Sort(in2)
|
||||||
|
|
||||||
|
common := []string{}
|
||||||
|
i, j := 0, 0
|
||||||
|
|
||||||
|
for i < len(in1) && j < len(in2) {
|
||||||
|
switch {
|
||||||
|
case in1[i] == in2[j]:
|
||||||
|
common = append(common, in1[i])
|
||||||
|
i++
|
||||||
|
j++
|
||||||
|
case in1[i] < in2[j]:
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
in1 = removeCommonPaths(in1, common)
|
||||||
|
in2 = removeCommonPaths(in2, common)
|
||||||
|
|
||||||
|
return in1, in2, common
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeCommonPaths(in, common []string) []string {
|
||||||
|
filtered := make([]string, 0, len(in))
|
||||||
|
commonIndex := 0
|
||||||
|
for _, path := range in {
|
||||||
|
if commonIndex < len(common) && path == common[commonIndex] {
|
||||||
|
commonIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, path)
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluatePaths(in []string) ([]string, bool, error) {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
allowAny := false
|
||||||
|
for _, p := range in {
|
||||||
|
if p == "*" {
|
||||||
|
allowAny = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := filepath.Abs(p)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("failed to evaluate entitlement path %q: %v", p, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, rest, err := evaluateToExistingPath(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
if rest != "" {
|
||||||
|
v = filepath.Join(v, rest)
|
||||||
|
}
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
return out, allowAny, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
m := make(map[string]struct{}, len(in))
|
||||||
|
for p := range in {
|
||||||
|
v, _, err := evaluateToExistingPath(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
m[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPath(in string) (string, string, error) {
|
||||||
|
in, err := filepath.Abs(in)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
volLen := volumeNameLen(in)
|
||||||
|
pathSeparator := string(os.PathSeparator)
|
||||||
|
|
||||||
|
if volLen < len(in) && os.IsPathSeparator(in[volLen]) {
|
||||||
|
volLen++
|
||||||
|
}
|
||||||
|
vol := in[:volLen]
|
||||||
|
dest := vol
|
||||||
|
linksWalked := 0
|
||||||
|
var end int
|
||||||
|
for start := volLen; start < len(in); start = end {
|
||||||
|
for start < len(in) && os.IsPathSeparator(in[start]) {
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
end = start
|
||||||
|
for end < len(in) && !os.IsPathSeparator(in[end]) {
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
|
||||||
|
if end == start {
|
||||||
|
break
|
||||||
|
} else if in[start:end] == "." {
|
||||||
|
continue
|
||||||
|
} else if in[start:end] == ".." {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen || dest[r+1:] == ".." {
|
||||||
|
if len(dest) > volLen {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += ".."
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += in[start:end]
|
||||||
|
|
||||||
|
fi, err := os.Lstat(dest)
|
||||||
|
if err != nil {
|
||||||
|
// If the component doesn't exist, return the last valid path
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
for r := len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
return dest[:r], in[start:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vol, in[start:], nil
|
||||||
|
}
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&fs.ModeSymlink == 0 {
|
||||||
|
if !fi.Mode().IsDir() && end < len(in) {
|
||||||
|
return "", "", syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
linksWalked++
|
||||||
|
if linksWalked > 255 {
|
||||||
|
return "", "", errors.New("too many symlinks")
|
||||||
|
}
|
||||||
|
|
||||||
|
link, err := os.Readlink(dest)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
in = link + in[end:]
|
||||||
|
|
||||||
|
v := volumeNameLen(link)
|
||||||
|
if v > 0 {
|
||||||
|
if v < len(link) && os.IsPathSeparator(link[v]) {
|
||||||
|
v++
|
||||||
|
}
|
||||||
|
vol = link[:v]
|
||||||
|
dest = vol
|
||||||
|
end = len(vol)
|
||||||
|
} else if len(link) > 0 && os.IsPathSeparator(link[0]) {
|
||||||
|
dest = link[:1]
|
||||||
|
end = 1
|
||||||
|
vol = link[:1]
|
||||||
|
volLen = 1
|
||||||
|
} else {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen {
|
||||||
|
dest = vol
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
end = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filepath.Clean(dest), "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeNameLen(s string) int {
|
||||||
|
return len(filepath.VolumeName(s))
|
||||||
|
}
|
||||||
|
|||||||
486
bake/entitlements_test.go
Normal file
486
bake/entitlements_test.go
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
package bake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEvaluateToExistingPath(t *testing.T) {
|
||||||
|
tempDir, err := osutil.GetLongPathName(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Setup temporary directory structure for testing
|
||||||
|
existingFile := filepath.Join(tempDir, "existing_file")
|
||||||
|
require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0644))
|
||||||
|
|
||||||
|
existingDir := filepath.Join(tempDir, "existing_dir")
|
||||||
|
require.NoError(t, os.Mkdir(existingDir, 0755))
|
||||||
|
|
||||||
|
symlinkToFile := filepath.Join(tempDir, "symlink_to_file")
|
||||||
|
require.NoError(t, os.Symlink(existingFile, symlinkToFile))
|
||||||
|
|
||||||
|
symlinkToDir := filepath.Join(tempDir, "symlink_to_dir")
|
||||||
|
require.NoError(t, os.Symlink(existingDir, symlinkToDir))
|
||||||
|
|
||||||
|
nonexistentPath := filepath.Join(tempDir, "nonexistent", "path", "file.txt")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Existing file",
|
||||||
|
input: existingFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Existing directory",
|
||||||
|
input: existingDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to file",
|
||||||
|
input: symlinkToFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to directory",
|
||||||
|
input: symlinkToDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent path",
|
||||||
|
input: nonexistentPath,
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent intermediate path",
|
||||||
|
input: filepath.Join(tempDir, "nonexistent", "file.txt"),
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Root path",
|
||||||
|
input: "/",
|
||||||
|
expected: func() string {
|
||||||
|
root, _ := filepath.Abs("/")
|
||||||
|
return root
|
||||||
|
}(),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result, _, err := evaluateToExistingPath(tt.input)
|
||||||
|
|
||||||
|
if tt.expectErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDedupePaths(t *testing.T) {
|
||||||
|
wd := osutil.GetWd()
|
||||||
|
tcases := []struct {
|
||||||
|
in map[string]struct{}
|
||||||
|
out map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/c/d": {},
|
||||||
|
"/a/b/c/d/e": {},
|
||||||
|
"/a/b/../b/c": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
filepath.Join(wd, "a/b/c"): {},
|
||||||
|
filepath.Join(wd, "../aa"): {},
|
||||||
|
filepath.Join(wd, "a/b"): {},
|
||||||
|
filepath.Join(wd, "a/b/d"): {},
|
||||||
|
filepath.Join(wd, "../aa/b"): {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"a/b": {},
|
||||||
|
"../aa": {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range tcases {
|
||||||
|
t.Run(fmt.Sprintf("case%d", i), func(t *testing.T) {
|
||||||
|
out, err := dedupPaths(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
// convert to relative paths as that is shown to user
|
||||||
|
arr := make([]string, 0, len(out))
|
||||||
|
for k := range out {
|
||||||
|
arr = append(arr, k)
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
arr = toRelativePaths(arr, wd)
|
||||||
|
m := make(map[string]struct{})
|
||||||
|
for _, v := range arr {
|
||||||
|
m[filepath.ToSlash(v)] = struct{}{}
|
||||||
|
}
|
||||||
|
o := make(map[string]struct{}, len(tc.out))
|
||||||
|
for k := range tc.out {
|
||||||
|
o[filepath.ToSlash(k)] = struct{}{}
|
||||||
|
}
|
||||||
|
require.Equal(t, o, m)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateEntitlements(t *testing.T) {
|
||||||
|
dir1 := t.TempDir()
|
||||||
|
dir2 := t.TempDir()
|
||||||
|
|
||||||
|
// the paths returned by entitlements validation will have symlinks resolved
|
||||||
|
expDir1, err := filepath.EvalSymlinks(dir1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expDir2, err := filepath.EvalSymlinks(dir2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
escapeLink := filepath.Join(dir1, "escape_link")
|
||||||
|
require.NoError(t, os.Symlink("../../aa", escapeLink))
|
||||||
|
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expWd, err := filepath.EvalSymlinks(wd)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tcases := []struct {
|
||||||
|
name string
|
||||||
|
conf EntitlementConf
|
||||||
|
opt build.Options
|
||||||
|
expected EntitlementConf
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "No entitlements",
|
||||||
|
opt: build.Options{
|
||||||
|
Inputs: build.Inputs{
|
||||||
|
ContextState: &llb.State{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityAndNetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityMissingAndNetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SSHMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
SSHSpecs: []*pb.SSH{
|
||||||
|
{
|
||||||
|
ID: "test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SSH: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ExportLocal",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
filepath.Join(dir1, "subdir"),
|
||||||
|
dir2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: func() []string {
|
||||||
|
exp := []string{expDir1, expDir2}
|
||||||
|
slices.Sort(exp)
|
||||||
|
return exp
|
||||||
|
}(),
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromSubFile",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: filepath.Join(dir1, "subfile"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLink",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{filepath.Join(expDir1, "../..")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowRoot",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"/"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: func() []string {
|
||||||
|
// on windows root (/) is only allowed if it is the same volume as wd
|
||||||
|
if filepath.VolumeName(wd) == filepath.VolumeName(escapeLink) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// if not, then escapeLink is not allowed
|
||||||
|
exp, _, err := evaluateToExistingPath(escapeLink)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp, err = filepath.EvalSymlinks(exp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return []string{exp}
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowAny",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"*"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathSubpath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as only subpath was allowed
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathMatches",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as build also needs to write not/exists directory
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingBuildPath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
expected, err := tc.conf.Validate(map[string]build.Options{"test": tc.opt})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.expected, expected)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGroupSamePaths(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in1 []string
|
||||||
|
in2 []string
|
||||||
|
expected1 []string
|
||||||
|
expected2 []string
|
||||||
|
expectedC []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "All common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{"/path/c", "/path/d"},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Some common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a"},
|
||||||
|
expected2: []string{"/path/d"},
|
||||||
|
expectedC: []string{"/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty inputs",
|
||||||
|
in1: []string{},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "One empty input",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unsorted inputs with common paths",
|
||||||
|
in1: []string{"/path/c", "/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/a"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
out1, out2, common := groupSamePaths(tt.in1, tt.in2)
|
||||||
|
require.Equal(t, tt.expected1, out1, "in1 should match expected1")
|
||||||
|
require.Equal(t, tt.expected2, out2, "in2 should match expected2")
|
||||||
|
require.Equal(t, tt.expectedC, common, "common should match expectedC")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -56,7 +56,7 @@ func formatHCLError(err error, files []File) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
src := errdefs.Source{
|
src := &errdefs.Source{
|
||||||
Info: &pb.SourceInfo{
|
Info: &pb.SourceInfo{
|
||||||
Filename: d.Subject.Filename,
|
Filename: d.Subject.Filename,
|
||||||
Data: dt,
|
Data: dt,
|
||||||
@@ -72,7 +72,7 @@ func formatHCLError(err error, files []File) error {
|
|||||||
|
|
||||||
func toErrRange(in *hcl.Range) *pb.Range {
|
func toErrRange(in *hcl.Range) *pb.Range {
|
||||||
return &pb.Range{
|
return &pb.Range{
|
||||||
Start: pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
Start: &pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
||||||
End: pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
End: &pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
267
bake/hcl_test.go
267
bake/hcl_test.go
@@ -2,8 +2,10 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,6 +19,7 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
target "db" {
|
target "db" {
|
||||||
context = "./db"
|
context = "./db"
|
||||||
tags = ["docker.io/tonistiigi/db"]
|
tags = ["docker.io/tonistiigi/db"]
|
||||||
|
output = ["type=image"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "webapp" {
|
target "webapp" {
|
||||||
@@ -25,6 +28,9 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
args = {
|
args = {
|
||||||
buildno = "123"
|
buildno = "123"
|
||||||
}
|
}
|
||||||
|
output = [
|
||||||
|
{ type = "image" }
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "cross" {
|
target "cross" {
|
||||||
@@ -49,18 +55,18 @@ func TestHCLBasic(t *testing.T) {
|
|||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, "cross", c.Targets[2].Name)
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
require.Equal(t, "webapp-plus", c.Targets[3].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
||||||
}
|
}
|
||||||
@@ -109,18 +115,18 @@ func TestHCLBasicInJSON(t *testing.T) {
|
|||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, "cross", c.Targets[2].Name)
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
require.Equal(t, "webapp-plus", c.Targets[3].Name)
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
|
||||||
}
|
}
|
||||||
@@ -146,7 +152,7 @@ func TestHCLWithFunctions(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,7 +182,7 @@ func TestHCLWithUserDefinedFunctions(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +211,7 @@ func TestHCLWithVariables(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[0].Args["buildno"])
|
||||||
|
|
||||||
t.Setenv("BUILD_NUMBER", "456")
|
t.Setenv("BUILD_NUMBER", "456")
|
||||||
@@ -218,7 +224,7 @@ func TestHCLWithVariables(t *testing.T) {
|
|||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("456"), c.Targets[0].Args["buildno"])
|
require.Equal(t, ptrstr("456"), c.Targets[0].Args["buildno"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,7 +247,7 @@ func TestHCLWithVariablesInFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
t.Setenv("REPO", "docker/buildx")
|
t.Setenv("REPO", "docker/buildx")
|
||||||
@@ -250,7 +256,7 @@ func TestHCLWithVariablesInFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,7 +285,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-abc"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-abc"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("abc-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("abc-post"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -292,7 +298,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("def-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("def-post"), c.Targets[0].Args["v2"])
|
||||||
}
|
}
|
||||||
@@ -328,7 +334,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre--ABCDEF-"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre--ABCDEF-"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("ABCDEF-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("ABCDEF-post"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -341,7 +347,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre--NEWDEF-"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre--NEWDEF-"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("NEWDEF-post"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("NEWDEF-post"), c.Targets[0].Args["v2"])
|
||||||
}
|
}
|
||||||
@@ -366,7 +372,7 @@ func TestHCLTypedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("lower"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("lower"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("yes"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("yes"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -377,7 +383,7 @@ func TestHCLTypedVariables(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("higher"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("higher"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("no"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("no"), c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
@@ -475,7 +481,7 @@ func TestHCLAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
// env does not apply if no variable
|
// env does not apply if no variable
|
||||||
@@ -484,7 +490,7 @@ func TestHCLAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
|
||||||
// attr-multifile
|
// attr-multifile
|
||||||
}
|
}
|
||||||
@@ -592,11 +598,172 @@ func TestHCLAttrsCustomType(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
attest = [
|
||||||
|
{ type = "provenance", mode = "max" },
|
||||||
|
"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
"type=local,src=path/to/cache",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [
|
||||||
|
{ type = "local", dest = "path/to/cache" },
|
||||||
|
]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
"type=local,dest=../out",
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
{ id = "mysecret2", env = "TOKEN" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/key"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
|
||||||
|
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,env=TOKEN"}, stringify(c.Targets[0].Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType_ObjectVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
variable "foo" {
|
||||||
|
default = "bar"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
"type=local,src=path/to/cache",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [ target.app.cache-from[0] ]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
"type=local,dest=../out",
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/${target.app.output[0].type}"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "web" {
|
||||||
|
cache-from = target.app.cache-from
|
||||||
|
|
||||||
|
output = [ "type=oci,dest=../${foo}.tar" ]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = target.app.output[0].type, src = "/${target.app.cache-from[1].type}/secret" },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
|
findTarget := func(t *testing.T, name string) *Target {
|
||||||
|
t.Helper()
|
||||||
|
for _, tgt := range c.Targets {
|
||||||
|
if tgt.Name == name {
|
||||||
|
return tgt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("could not find target %q", name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
app := findTarget(t, "app")
|
||||||
|
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(app.Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(app.CacheFrom))
|
||||||
|
require.Equal(t, []string{"user/app:cache"}, stringify(app.CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret"}, stringify(app.Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/oci"}, stringify(app.SSH))
|
||||||
|
|
||||||
|
web := findTarget(t, "web")
|
||||||
|
require.Equal(t, []string{"type=oci,dest=../bar.tar"}, stringify(web.Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(web.CacheFrom))
|
||||||
|
require.Equal(t, []string{"id=oci,src=/local/secret"}, stringify(web.Secrets))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType_MissingVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
attest = [
|
||||||
|
"type=sbom,disabled=${SBOM}",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:${FOO1}" },
|
||||||
|
"type=local,src=path/to/cache:${FOO2}",
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [
|
||||||
|
{ type = "local", dest = "path/to/${BAR}" },
|
||||||
|
]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../${OUTPUT}.tar" },
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/${SECRET}" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "key", paths = ["path/to/${SSH_KEY}"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
_, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.ErrorAs(t, err, &diags)
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`There is no variable named "([\w\d_]+)"`)
|
||||||
|
var actual []string
|
||||||
|
for _, diag := range diags {
|
||||||
|
if m := re.FindStringSubmatch(diag.Error()); m != nil {
|
||||||
|
actual = append(actual, m[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.ElementsMatch(t,
|
||||||
|
[]string{"SBOM", "FOO1", "FOO2", "BAR", "OUTPUT", "SECRET", "SSH_KEY"},
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
func TestHCLMultiFileAttrs(t *testing.T) {
|
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
variable "FOO" {
|
variable "FOO" {
|
||||||
@@ -618,7 +785,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
t.Setenv("FOO", "ghi")
|
t.Setenv("FOO", "ghi")
|
||||||
@@ -630,7 +797,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -653,7 +820,7 @@ func TestHCLMultiFileGlobalAttrs(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, "pre-def", *c.Targets[0].Args["v1"])
|
require.Equal(t, "pre-def", *c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -839,12 +1006,12 @@ func TestHCLRenameMultiFile(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "bar")
|
require.Equal(t, "bar", c.Targets[0].Name)
|
||||||
require.Equal(t, *c.Targets[0].Dockerfile, "x")
|
require.Equal(t, "x", *c.Targets[0].Dockerfile)
|
||||||
require.Equal(t, *c.Targets[0].Target, "z")
|
require.Equal(t, "z", *c.Targets[0].Target)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "foo")
|
require.Equal(t, "foo", c.Targets[1].Name)
|
||||||
require.Equal(t, *c.Targets[1].Context, "y")
|
require.Equal(t, "y", *c.Targets[1].Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHCLMatrixBasic(t *testing.T) {
|
func TestHCLMatrixBasic(t *testing.T) {
|
||||||
@@ -862,10 +1029,10 @@ func TestHCLMatrixBasic(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "x")
|
require.Equal(t, "x", c.Targets[0].Name)
|
||||||
require.Equal(t, c.Targets[1].Name, "y")
|
require.Equal(t, "y", c.Targets[1].Name)
|
||||||
require.Equal(t, *c.Targets[0].Dockerfile, "x.Dockerfile")
|
require.Equal(t, "x.Dockerfile", *c.Targets[0].Dockerfile)
|
||||||
require.Equal(t, *c.Targets[1].Dockerfile, "y.Dockerfile")
|
require.Equal(t, "y.Dockerfile", *c.Targets[1].Dockerfile)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
@@ -948,9 +1115,9 @@ func TestHCLMatrixMaps(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "aa")
|
require.Equal(t, "aa", c.Targets[0].Name)
|
||||||
require.Equal(t, c.Targets[0].Args["target"], ptrstr("valbb"))
|
require.Equal(t, c.Targets[0].Args["target"], ptrstr("valbb"))
|
||||||
require.Equal(t, c.Targets[1].Name, "cc")
|
require.Equal(t, "cc", c.Targets[1].Name)
|
||||||
require.Equal(t, c.Targets[1].Args["target"], ptrstr("valdd"))
|
require.Equal(t, c.Targets[1].Args["target"], ptrstr("valdd"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1141,7 +1308,7 @@ func TestJSONAttributes(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-abc-def"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-abc-def"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1166,7 +1333,7 @@ func TestJSONFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("pre-<FOO-abc>"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-<FOO-abc>"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1184,7 +1351,7 @@ func TestJSONInvalidFunctions(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr(`myfunc("foo")`), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr(`myfunc("foo")`), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1212,7 +1379,7 @@ func TestHCLFunctionInAttr(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("FOO <> [baz]"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("FOO <> [baz]"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1243,7 +1410,7 @@ services:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
|
||||||
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
|
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
|
||||||
require.Equal(t, "dir", *c.Targets[0].Context)
|
require.Equal(t, "dir", *c.Targets[0].Context)
|
||||||
@@ -1266,7 +1433,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "app")
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
require.Equal(t, "foo", *c.Targets[0].Context)
|
require.Equal(t, "foo", *c.Targets[0].Context)
|
||||||
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
||||||
}
|
}
|
||||||
@@ -1332,17 +1499,17 @@ target "b" {
|
|||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "metadata-a")
|
require.Equal(t, "metadata-a", c.Targets[0].Name)
|
||||||
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "metadata-b")
|
require.Equal(t, "metadata-b", c.Targets[1].Name)
|
||||||
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "a")
|
require.Equal(t, "a", c.Targets[2].Name)
|
||||||
require.Equal(t, ".", *c.Targets[2].Context)
|
require.Equal(t, ".", *c.Targets[2].Context)
|
||||||
require.Equal(t, "a", *c.Targets[2].Target)
|
require.Equal(t, "a", *c.Targets[2].Target)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "b")
|
require.Equal(t, "b", c.Targets[3].Name)
|
||||||
require.Equal(t, ".", *c.Targets[3].Context)
|
require.Equal(t, ".", *c.Targets[3].Context)
|
||||||
require.Equal(t, "b", *c.Targets[3].Target)
|
require.Equal(t, "b", *c.Targets[3].Target)
|
||||||
}
|
}
|
||||||
@@ -1389,10 +1556,10 @@ target "two" {
|
|||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "one")
|
require.Equal(t, "one", c.Targets[0].Name)
|
||||||
require.Equal(t, map[string]*string{"a": ptrstr("pre-ghi-jkl")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"a": ptrstr("pre-ghi-jkl")}, c.Targets[0].Args)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "two")
|
require.Equal(t, "two", c.Targets[1].Name)
|
||||||
require.Equal(t, map[string]*string{"b": ptrstr("pre-jkl")}, c.Targets[1].Args)
|
require.Equal(t, map[string]*string{"b": ptrstr("pre-jkl")}, c.Targets[1].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1478,7 +1645,7 @@ func TestHCLIndexOfFunc(t *testing.T) {
|
|||||||
require.Empty(t, c.Targets[1].Tags[1])
|
require.Empty(t, c.Targets[1].Tags[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
func ptrstr(s interface{}) *string {
|
func ptrstr(s any) *string {
|
||||||
var n *string
|
var n *string
|
||||||
if reflect.ValueOf(s).Kind() == reflect.String {
|
if reflect.ValueOf(s).Kind() == reflect.String {
|
||||||
ss := s.(string)
|
ss := s.(string)
|
||||||
|
|||||||
355
bake/hclparser/LICENSE
Normal file
355
bake/hclparser/LICENSE
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
Copyright (c) 2014 HashiCorp, Inc.
|
||||||
|
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
348
bake/hclparser/gohcl/decode.go
Normal file
348
bake/hclparser/gohcl/decode.go
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodeOptions allows customizing sections of the decoding process.
|
||||||
|
type DecodeOptions struct {
|
||||||
|
ImpliedType func(gv any) (cty.Type, error)
|
||||||
|
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.decodeBodyToValue(body, ctx, rv.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeBody extracts the configuration within the given body into the given
|
||||||
|
// value. This value must be a non-nil pointer to either a struct or
|
||||||
|
// a map, where in the former case the configuration will be decoded using
|
||||||
|
// struct tags and in the latter case only attributes are allowed and their
|
||||||
|
// values are decoded into the map.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
et := val.Type()
|
||||||
|
switch et.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return o.decodeBodyToStruct(body, ctx, val)
|
||||||
|
case reflect.Map:
|
||||||
|
return o.decodeBodyToMap(body, ctx, val)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
schema, partial := ImpliedBodySchema(val.Interface())
|
||||||
|
|
||||||
|
var content *hcl.BodyContent
|
||||||
|
var leftovers hcl.Body
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
if partial {
|
||||||
|
content, leftovers, diags = body.PartialContent(schema)
|
||||||
|
} else {
|
||||||
|
content, diags = body.Content(schema)
|
||||||
|
}
|
||||||
|
if content == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(val.Type())
|
||||||
|
|
||||||
|
if tags.Body != nil {
|
||||||
|
fieldIdx := *tags.Body
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(body))
|
||||||
|
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags.Remain != nil {
|
||||||
|
fieldIdx := *tags.Remain
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(leftovers))
|
||||||
|
case attrsType.AssignableTo(field.Type):
|
||||||
|
attrs, attrsDiags := leftovers.JustAttributes()
|
||||||
|
if len(attrsDiags) > 0 {
|
||||||
|
diags = append(diags, attrsDiags...)
|
||||||
|
}
|
||||||
|
fieldV.Set(reflect.ValueOf(attrs))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, fieldIdx := range tags.Attributes {
|
||||||
|
attr := content.Attributes[name]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
|
||||||
|
if attr == nil {
|
||||||
|
if !exprType.AssignableTo(field.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a special case, if the target is of type hcl.Expression then
|
||||||
|
// we'll assign an actual expression that evalues to a cty null,
|
||||||
|
// so the caller can deal with it within the cty realm rather
|
||||||
|
// than within the Go realm.
|
||||||
|
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||||
|
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.DecodeExpression(
|
||||||
|
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksByType := content.Blocks.ByType()
|
||||||
|
|
||||||
|
for typeName, fieldIdx := range tags.Blocks {
|
||||||
|
blocks := blocksByType[typeName]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
|
||||||
|
ty := field.Type
|
||||||
|
isSlice := false
|
||||||
|
isPtr := false
|
||||||
|
if ty.Kind() == reflect.Slice {
|
||||||
|
isSlice = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
isPtr = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) > 1 && !isSlice {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Only one %s block is allowed. Another was defined at %s.",
|
||||||
|
typeName, blocks[0].DefRange.String(),
|
||||||
|
),
|
||||||
|
Subject: &blocks[1].DefRange,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
if isSlice || isPtr {
|
||||||
|
if val.Field(fieldIdx).IsNil() {
|
||||||
|
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||||
|
Subject: body.MissingItemRange().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isSlice:
|
||||||
|
elemType := ty
|
||||||
|
if isPtr {
|
||||||
|
elemType = reflect.PointerTo(ty)
|
||||||
|
}
|
||||||
|
sli := val.Field(fieldIdx)
|
||||||
|
if sli.IsNil() {
|
||||||
|
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
if isPtr {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.New(ty))
|
||||||
|
}
|
||||||
|
v := sli.Index(i)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
sli.Index(i).Set(v)
|
||||||
|
} else {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sli.Len() > len(blocks) {
|
||||||
|
sli.SetLen(len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
val.Field(fieldIdx).Set(sli)
|
||||||
|
|
||||||
|
default:
|
||||||
|
block := blocks[0]
|
||||||
|
if isPtr {
|
||||||
|
v := val.Field(fieldIdx)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
val.Field(fieldIdx).Set(v)
|
||||||
|
} else {
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
attrs, diags := body.JustAttributes()
|
||||||
|
if attrs == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := reflect.MakeMap(v.Type())
|
||||||
|
|
||||||
|
for k, attr := range attrs {
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
ev := reflect.New(v.Type().Elem())
|
||||||
|
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Set(mv)
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
diags := o.decodeBodyToValue(block.Body, ctx, v)
|
||||||
|
|
||||||
|
if len(block.Labels) > 0 {
|
||||||
|
blockTags := getFieldTags(v.Type())
|
||||||
|
for li, lv := range block.Labels {
|
||||||
|
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||||
|
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
srcVal, diags := expr.Value(ctx)
|
||||||
|
|
||||||
|
convTy, err := o.ImpliedType(val)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVal, err = o.Convert(srcVal, convTy)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gocty.FromCtyValue(srcVal, val)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeExpression extracts the value of the given expression into the given
|
||||||
|
// value. This value must be something that gocty is able to decode into,
|
||||||
|
// since the final decoding is delegated to that package.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) withDefaults() DecodeOptions {
|
||||||
|
if o.ImpliedType == nil {
|
||||||
|
o.ImpliedType = gocty.ImpliedType
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Convert == nil {
|
||||||
|
o.Convert = convert.Convert
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
806
bake/hclparser/gohcl/decode_test.go
Normal file
806
bake/hclparser/gohcl/decode_test.go
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
hclJSON "github.com/hashicorp/hcl/v2/json"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeBody(t *testing.T) {
|
||||||
|
deepEquals := func(other any) func(v any) bool {
|
||||||
|
return func(v any) bool {
|
||||||
|
return reflect.DeepEqual(v, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNameExpression struct {
|
||||||
|
Name hcl.Expression `hcl:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withTwoAttributes struct {
|
||||||
|
A string `hcl:"a,optional"`
|
||||||
|
B string `hcl:"b,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNestedBlock struct {
|
||||||
|
Plain string `hcl:"plain,optional"`
|
||||||
|
Nested *withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocks struct {
|
||||||
|
Nested []*withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocksNoPointers struct {
|
||||||
|
Nested []withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Body map[string]any
|
||||||
|
Target func() any
|
||||||
|
Check func(v any) bool
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct{}{}),
|
||||||
|
deepEquals(struct{}{}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
1, // name is required
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name nil
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name optional
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.IsNull()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 23,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
1, // Extraneous "age" property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
})
|
||||||
|
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Remain.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{
|
||||||
|
Name: "Ermintrude",
|
||||||
|
Remain: map[string]cty.Value{
|
||||||
|
"living": cty.True,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Body.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 2 &&
|
||||||
|
attrs["name"] != nil && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle == nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 0
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
//nolint:misspell
|
||||||
|
// Generating two diagnostics is good enough for this one.
|
||||||
|
// (one for the missing noodle block and the other for
|
||||||
|
// the JSON serialization detecting the missing level of
|
||||||
|
// heirarchy for the label.)
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// One diagnostic is enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodles := gotI.(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodles
|
||||||
|
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{
|
||||||
|
"type": "rice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo" && noodle.Type == "rice"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 34,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]string(nil)),
|
||||||
|
deepEquals(map[string]string{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": "34",
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 89,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]*hcl.Attribute)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 13,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]hcl.Expression)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]cty.Value(nil)),
|
||||||
|
deepEquals(map[string]cty.Value{
|
||||||
|
"name": cty.StringVal("Ermintrude"),
|
||||||
|
"living": cty.True,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"plain": "foo",
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Plain: "bar",
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
A: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": map[string]any{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block list while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Remove additional elements from the list while decoding nested blocks
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return len(n.Nested) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Make sure decoding value slices works the same as pointer slices.
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"b": "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"b": "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocksNoPointers{
|
||||||
|
Nested: []withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocksNoPointers)
|
||||||
|
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
// For convenience here we're going to use the JSON parser
|
||||||
|
// to process the given body.
|
||||||
|
buf, err := json.Marshal(test.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(string(buf), func(t *testing.T) {
|
||||||
|
file, diags := hclJSON.Parse(buf, "test.json")
|
||||||
|
if len(diags) != 0 {
|
||||||
|
t.Fatalf("diagnostics while parsing: %s", diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
targetVal := reflect.ValueOf(test.Target())
|
||||||
|
|
||||||
|
diags = DecodeBody(file.Body, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !test.Check(got) {
|
||||||
|
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeExpression(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Value cty.Value
|
||||||
|
Target any
|
||||||
|
Want any
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
"",
|
||||||
|
"hello",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
cty.NilVal,
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NumberIntVal(2),
|
||||||
|
"",
|
||||||
|
"2",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("true"),
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NullVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // null value is not allowed
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.UnknownVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // value must be known
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.ListVal([]cty.Value{cty.True}),
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
1, // bool required
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
|
||||||
|
expr := &fixedExpression{test.Value}
|
||||||
|
|
||||||
|
targetVal := reflect.New(reflect.TypeOf(test.Target))
|
||||||
|
|
||||||
|
diags := DecodeExpression(expr, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !reflect.DeepEqual(got, test.Want) {
|
||||||
|
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fixedExpression struct {
|
||||||
|
val cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||||
|
return e.val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Range() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) StartRange() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Variables() []hcl.Traversal {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeInstantiateType(target any) func() any {
|
||||||
|
return func() any {
|
||||||
|
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
65
bake/hclparser/gohcl/doc.go
Normal file
65
bake/hclparser/gohcl/doc.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||||
|
//
|
||||||
|
// It provides a convenient and concise way of describing the schema for
|
||||||
|
// configuration and then accessing the resulting data via native Go
|
||||||
|
// types.
|
||||||
|
//
|
||||||
|
// A struct field tag scheme is used, similar to other decoding and
|
||||||
|
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||||
|
//
|
||||||
|
// ThingType string `hcl:"thing_type,attr"`
|
||||||
|
//
|
||||||
|
// Within each tag there are two comma-separated tokens. The first is the
|
||||||
|
// name of the corresponding construct in configuration, while the second
|
||||||
|
// is a keyword giving the kind of construct expected. The following
|
||||||
|
// kind keywords are supported:
|
||||||
|
//
|
||||||
|
// attr (the default) indicates that the value is to be populated from an attribute
|
||||||
|
// block indicates that the value is to populated from a block
|
||||||
|
// label indicates that the value is to populated from a block label
|
||||||
|
// optional is the same as attr, but the field is optional
|
||||||
|
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||||
|
//
|
||||||
|
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||||
|
// expression is assigned, or of any type accepted by gocty, in which case
|
||||||
|
// gocty will be used to assign the value to a native Go type.
|
||||||
|
//
|
||||||
|
// "block" fields may be a struct that recursively uses the same tags, or a
|
||||||
|
// slice of such structs, in which case multiple blocks of the corresponding
|
||||||
|
// type are decoded into the slice.
|
||||||
|
//
|
||||||
|
// "body" can be placed on a single field of type hcl.Body to capture
|
||||||
|
// the full hcl.Body that was decoded for a block. This does not allow leftover
|
||||||
|
// values like "remain", so a decoding error will still be returned if leftover
|
||||||
|
// fields are given. If you want to capture the decoding body PLUS leftover
|
||||||
|
// fields, you must specify a "remain" field as well to prevent errors. The
|
||||||
|
// body field and the remain field will both contain the leftover fields.
|
||||||
|
//
|
||||||
|
// "label" fields are considered only in a struct used as the type of a field
|
||||||
|
// marked as "block", and are used sequentially to capture the labels of
|
||||||
|
// the blocks being decoded. In this case, the name token is used only as
|
||||||
|
// an identifier for the label in diagnostic messages.
|
||||||
|
//
|
||||||
|
// "optional" fields behave like "attr" fields, but they are optional
|
||||||
|
// and will not give parsing errors if they are missing.
|
||||||
|
//
|
||||||
|
// "remain" can be placed on a single field that may be either of type
|
||||||
|
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||||
|
// placed into this field for delayed processing. If no "remain" field is
|
||||||
|
// present then any attributes or blocks not matched by another valid tag
|
||||||
|
// will cause an error diagnostic.
|
||||||
|
//
|
||||||
|
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||||
|
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||||
|
// on the constraints there.
|
||||||
|
//
|
||||||
|
// Broadly-speaking this package deals with two types of error. The first is
|
||||||
|
// errors in the configuration itself, which are returned as diagnostics
|
||||||
|
// written with the configuration author as the target audience. The second
|
||||||
|
// is bugs in the calling program, such as invalid struct tags, which are
|
||||||
|
// surfaced via panics since there can be no useful runtime handling of such
|
||||||
|
// errors and they should certainly not be returned to the user as diagnostics.
|
||||||
|
package gohcl
|
||||||
192
bake/hclparser/gohcl/encode.go
Normal file
192
bake/hclparser/gohcl/encode.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||||
|
// attributes and blocks derived from the given value, which must be a
|
||||||
|
// struct value or a pointer to a struct value with the struct tags defined
|
||||||
|
// in this package.
|
||||||
|
//
|
||||||
|
// This function can work only with fully-decoded data. It will ignore any
|
||||||
|
// fields tagged as "remain", any fields that decode attributes into either
|
||||||
|
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||||
|
// into hcl.Attributes values. This function does not have enough information
|
||||||
|
// to complete the decoding of these types.
|
||||||
|
//
|
||||||
|
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||||
|
// to produce a whole hclwrite.Block including block labels.
|
||||||
|
//
|
||||||
|
// As long as a suitable value is given to encode and the destination body
|
||||||
|
// is non-nil, this function will always complete. It will panic in case of
|
||||||
|
// any errors in the calling program, such as passing an inappropriate type
|
||||||
|
// or a nil body.
|
||||||
|
//
|
||||||
|
// The layout of the resulting HCL source is derived from the ordering of
|
||||||
|
// the struct fields, with blank lines around nested blocks of different types.
|
||||||
|
// Fields representing attributes should usually precede those representing
|
||||||
|
// blocks so that the attributes can group together in the result. For more
|
||||||
|
// control, use the hclwrite API directly.
|
||||||
|
func EncodeIntoBody(val any, dst *hclwrite.Body) {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
populateBody(rv, ty, tags, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||||
|
// the given value, which must be a struct or pointer to struct with the
|
||||||
|
// struct tags defined in this package.
|
||||||
|
//
|
||||||
|
// If the given struct type has fields tagged with "label" tags then they
|
||||||
|
// will be used in order to annotate the created block with labels.
|
||||||
|
//
|
||||||
|
// This function has the same constraints as EncodeIntoBody and will panic
|
||||||
|
// if they are violated.
|
||||||
|
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
labels := make([]string, len(tags.Labels))
|
||||||
|
for i, lf := range tags.Labels {
|
||||||
|
lv := rv.Field(lf.FieldIndex)
|
||||||
|
// We just stringify whatever we find. It should always be a string
|
||||||
|
// but if not then we'll still do something reasonable.
|
||||||
|
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := hclwrite.NewBlock(blockType, labels)
|
||||||
|
populateBody(rv, ty, tags, block.Body())
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||||
|
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
for n, i := range tags.Attributes {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
for n, i := range tags.Blocks {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||||
|
ni, nj := namesOrder[i], namesOrder[j]
|
||||||
|
return nameIdxs[ni] < nameIdxs[nj]
|
||||||
|
})
|
||||||
|
|
||||||
|
dst.Clear()
|
||||||
|
|
||||||
|
prevWasBlock := false
|
||||||
|
for _, name := range namesOrder {
|
||||||
|
fieldIdx := nameIdxs[name]
|
||||||
|
field := ty.Field(fieldIdx)
|
||||||
|
fieldTy := field.Type
|
||||||
|
fieldVal := rv.Field(fieldIdx)
|
||||||
|
|
||||||
|
if fieldTy.Kind() == reflect.Ptr {
|
||||||
|
fieldTy = fieldTy.Elem()
|
||||||
|
fieldVal = fieldVal.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||||
|
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
if prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = false
|
||||||
|
}
|
||||||
|
|
||||||
|
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||||
|
if err != nil {
|
||||||
|
// This should never happen, since we should always be able
|
||||||
|
// to decode into the implied type.
|
||||||
|
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.SetAttributeValue(name, val)
|
||||||
|
} else { // must be a block, then
|
||||||
|
elemTy := fieldTy
|
||||||
|
isSeq := false
|
||||||
|
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||||
|
isSeq = true
|
||||||
|
elemTy = elemTy.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
prevWasBlock = false
|
||||||
|
|
||||||
|
if isSeq {
|
||||||
|
l := fieldVal.Len()
|
||||||
|
for i := range l {
|
||||||
|
elemVal := fieldVal.Index(i)
|
||||||
|
if !elemVal.IsValid() {
|
||||||
|
continue // ignore (elem value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
67
bake/hclparser/gohcl/encode_test.go
Normal file
67
bake/hclparser/gohcl/encode_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/gohcl"
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleEncodeIntoBody() {
|
||||||
|
type Service struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Exe []string `hcl:"executable"`
|
||||||
|
}
|
||||||
|
type Constraints struct {
|
||||||
|
OS string `hcl:"os"`
|
||||||
|
Arch string `hcl:"arch"`
|
||||||
|
}
|
||||||
|
type App struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Desc string `hcl:"description"`
|
||||||
|
Constraints *Constraints `hcl:"constraints,block"`
|
||||||
|
Services []Service `hcl:"service,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
app := App{
|
||||||
|
Name: "awesome-app",
|
||||||
|
Desc: "Such an awesome application",
|
||||||
|
Constraints: &Constraints{
|
||||||
|
OS: "linux",
|
||||||
|
Arch: "amd64",
|
||||||
|
},
|
||||||
|
Services: []Service{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Exe: []string{"./web", "--listen=:8080"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "worker",
|
||||||
|
Exe: []string{"./worker"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
f := hclwrite.NewEmptyFile()
|
||||||
|
gohcl.EncodeIntoBody(&app, f.Body())
|
||||||
|
fmt.Printf("%s", f.Bytes())
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// name = "awesome-app"
|
||||||
|
// description = "Such an awesome application"
|
||||||
|
//
|
||||||
|
// constraints {
|
||||||
|
// os = "linux"
|
||||||
|
// arch = "amd64"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// service "web" {
|
||||||
|
// executable = ["./web", "--listen=:8080"]
|
||||||
|
// }
|
||||||
|
// service "worker" {
|
||||||
|
// executable = ["./worker"]
|
||||||
|
// }
|
||||||
|
}
|
||||||
185
bake/hclparser/gohcl/schema.go
Normal file
185
bake/hclparser/gohcl/schema.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||||
|
// given value, which must be a struct value or a pointer to one. If an
|
||||||
|
// inappropriate value is passed, this function will panic.
|
||||||
|
//
|
||||||
|
// The second return argument indicates whether the given struct includes
|
||||||
|
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||||
|
//
|
||||||
|
// This uses the tags on the fields of the struct to discover how each
|
||||||
|
// field's value should be expressed within configuration. If an invalid
|
||||||
|
// mapping is attempted, this function will panic.
|
||||||
|
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
|
||||||
|
ty := reflect.TypeOf(val)
|
||||||
|
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrSchemas []hcl.AttributeSchema
|
||||||
|
var blockSchemas []hcl.BlockHeaderSchema
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
|
||||||
|
attrNames := make([]string, 0, len(tags.Attributes))
|
||||||
|
for n := range tags.Attributes {
|
||||||
|
attrNames = append(attrNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(attrNames)
|
||||||
|
for _, n := range attrNames {
|
||||||
|
idx := tags.Attributes[n]
|
||||||
|
optional := tags.Optional[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
|
||||||
|
var required bool
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case field.Type.AssignableTo(exprType):
|
||||||
|
//nolint:misspell
|
||||||
|
// If we're decoding to hcl.Expression then absense can be
|
||||||
|
// indicated via a null value, so we don't specify that
|
||||||
|
// the field is required during decoding.
|
||||||
|
required = false
|
||||||
|
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||||
|
required = true
|
||||||
|
default:
|
||||||
|
required = false
|
||||||
|
}
|
||||||
|
|
||||||
|
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||||
|
Name: n,
|
||||||
|
Required: required,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
blockNames := make([]string, 0, len(tags.Blocks))
|
||||||
|
for n := range tags.Blocks {
|
||||||
|
blockNames = append(blockNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(blockNames)
|
||||||
|
for _, n := range blockNames {
|
||||||
|
idx := tags.Blocks[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
fty := field.Type
|
||||||
|
if fty.Kind() == reflect.Slice {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() == reflect.Ptr {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
ftags := getFieldTags(fty)
|
||||||
|
var labelNames []string
|
||||||
|
if len(ftags.Labels) > 0 {
|
||||||
|
labelNames = make([]string, len(ftags.Labels))
|
||||||
|
for i, l := range ftags.Labels {
|
||||||
|
labelNames[i] = l.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||||
|
Type: n,
|
||||||
|
LabelNames: labelNames,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
partial = tags.Remain != nil
|
||||||
|
schema = &hcl.BodySchema{
|
||||||
|
Attributes: attrSchemas,
|
||||||
|
Blocks: blockSchemas,
|
||||||
|
}
|
||||||
|
return schema, partial
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldTags struct {
|
||||||
|
Attributes map[string]int
|
||||||
|
Blocks map[string]int
|
||||||
|
Labels []labelField
|
||||||
|
Remain *int
|
||||||
|
Body *int
|
||||||
|
Optional map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type labelField struct {
|
||||||
|
FieldIndex int
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||||
|
ret := &fieldTags{
|
||||||
|
Attributes: map[string]int{},
|
||||||
|
Blocks: map[string]int{},
|
||||||
|
Optional: map[string]bool{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := ty.NumField()
|
||||||
|
for i := range ct {
|
||||||
|
field := ty.Field(i)
|
||||||
|
tag := field.Tag.Get("hcl")
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := strings.Index(tag, ",")
|
||||||
|
var name, kind string
|
||||||
|
if comma != -1 {
|
||||||
|
name = tag[:comma]
|
||||||
|
kind = tag[comma+1:]
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
kind = "attr"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case "attr":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
case "block":
|
||||||
|
ret.Blocks[name] = i
|
||||||
|
case "label":
|
||||||
|
ret.Labels = append(ret.Labels, labelField{
|
||||||
|
FieldIndex: i,
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
case "remain":
|
||||||
|
if ret.Remain != nil {
|
||||||
|
panic("only one 'remain' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Remain = &idx
|
||||||
|
case "body":
|
||||||
|
if ret.Body != nil {
|
||||||
|
panic("only one 'body' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Body = &idx
|
||||||
|
case "optional":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
ret.Optional[name] = true
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
233
bake/hclparser/gohcl/schema_test.go
Normal file
233
bake/hclparser/gohcl/schema_test.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestImpliedBodySchema(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
val any
|
||||||
|
wantSchema *hcl.BodySchema
|
||||||
|
wantPartial bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
struct{}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Ignored bool
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr1 bool `hcl:"attr1"`
|
||||||
|
Attr2 bool `hcl:"attr2"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr1",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "attr2",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr *bool `hcl:"attr,attr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct{} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing []struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing *struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Something string `hcl:"something"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Config string `hcl:",remain"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Expr hcl.Expression `hcl:"expr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "expr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Meh string `hcl:"meh,optional"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "meh",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
|
||||||
|
schema, partial := ImpliedBodySchema(test.val)
|
||||||
|
if !reflect.DeepEqual(schema, test.wantSchema) {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong schema\ngot: %s\nwant: %s",
|
||||||
|
spew.Sdump(schema), spew.Sdump(test.wantSchema),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if partial != test.wantPartial {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong partial flag\ngot: %#v\nwant: %#v",
|
||||||
|
partial, test.wantPartial,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
19
bake/hclparser/gohcl/types.go
Normal file
19
bake/hclparser/gohcl/types.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var victimExpr hcl.Expression
|
||||||
|
var victimBody hcl.Body
|
||||||
|
|
||||||
|
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||||
|
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||||
|
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
|
||||||
|
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||||
|
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
||||||
@@ -7,15 +7,16 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/bake/hclparser/gohcl"
|
||||||
"github.com/docker/buildx/util/userfunc"
|
"github.com/docker/buildx/util/userfunc"
|
||||||
"github.com/hashicorp/hcl/v2"
|
"github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/gohcl"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/gocty"
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
@@ -28,10 +29,16 @@ type variable struct {
|
|||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||||
Description string `json:"description,omitempty" hcl:"description,optional"`
|
Description string `json:"description,omitempty" hcl:"description,optional"`
|
||||||
|
Validations []*variableValidation `json:"validation,omitempty" hcl:"validation,block"`
|
||||||
Body hcl.Body `json:"-" hcl:",body"`
|
Body hcl.Body `json:"-" hcl:",body"`
|
||||||
Remain hcl.Body `json:"-" hcl:",remain"`
|
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type variableValidation struct {
|
||||||
|
Condition hcl.Expression `json:"condition" hcl:"condition"`
|
||||||
|
ErrorMessage hcl.Expression `json:"error_message" hcl:"error_message"`
|
||||||
|
}
|
||||||
|
|
||||||
type functionDef struct {
|
type functionDef struct {
|
||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
||||||
@@ -448,7 +455,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// decode!
|
// decode!
|
||||||
diag = gohcl.DecodeBody(body(), ectx, output.Interface())
|
diag = decodeBody(body(), ectx, output.Interface())
|
||||||
if diag.HasErrors() {
|
if diag.HasErrors() {
|
||||||
return diag
|
return diag
|
||||||
}
|
}
|
||||||
@@ -470,11 +477,11 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// store the result into the evaluation context (so it can be referenced)
|
// store the result into the evaluation context (so it can be referenced)
|
||||||
outputType, err := gocty.ImpliedType(output.Interface())
|
outputType, err := ImpliedType(output.Interface())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
|
outputValue, err := ToCtyValue(output.Interface(), outputType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -486,7 +493,12 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
|||||||
m = map[string]cty.Value{}
|
m = map[string]cty.Value{}
|
||||||
}
|
}
|
||||||
m[name] = outputValue
|
m[name] = outputValue
|
||||||
p.ectx.Variables[block.Type] = cty.MapVal(m)
|
|
||||||
|
// The logical contents of this structure is similar to a map,
|
||||||
|
// but it's possible for some attributes to be different in a way that's
|
||||||
|
// illegal for a map so we use an object here instead which is structurally
|
||||||
|
// equivalent but allows disparate types for different keys.
|
||||||
|
p.ectx.Variables[block.Type] = cty.ObjectVal(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -541,10 +553,67 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
for _, v := range vars {
|
||||||
|
for _, rule := range v.Validations {
|
||||||
|
resultVal, condDiags := rule.Condition.Value(ectx)
|
||||||
|
if condDiags.HasErrors() {
|
||||||
|
diags = append(diags, condDiags...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if resultVal.IsNull() {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid condition result",
|
||||||
|
Detail: "Condition expression must return either true or false, not null.",
|
||||||
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
|
Expression: rule.Condition,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
resultVal, err = convert.Convert(resultVal, cty.Bool)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid condition result",
|
||||||
|
Detail: fmt.Sprintf("Invalid condition result value: %s", err),
|
||||||
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
|
Expression: rule.Condition,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !resultVal.True() {
|
||||||
|
message, msgDiags := rule.ErrorMessage.Value(ectx)
|
||||||
|
if msgDiags.HasErrors() {
|
||||||
|
diags = append(diags, msgDiags...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
errorMessage := "This check failed, but has an invalid error message."
|
||||||
|
if !message.IsNull() {
|
||||||
|
errorMessage = message.AsString()
|
||||||
|
}
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Validation failed",
|
||||||
|
Detail: errorMessage,
|
||||||
|
Subject: rule.Condition.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
type Variable struct {
|
type Variable struct {
|
||||||
Name string
|
Name string `json:"name"`
|
||||||
Description string
|
Description string `json:"description,omitempty"`
|
||||||
Value *string
|
Value *string `json:"value,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ParseMeta struct {
|
type ParseMeta struct {
|
||||||
@@ -552,7 +621,7 @@ type ParseMeta struct {
|
|||||||
AllVariables []*Variable
|
AllVariables []*Variable
|
||||||
}
|
}
|
||||||
|
|
||||||
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
|
||||||
reserved := map[string]struct{}{}
|
reserved := map[string]struct{}{}
|
||||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||||
|
|
||||||
@@ -686,6 +755,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
}
|
}
|
||||||
vars = append(vars, v)
|
vars = append(vars, v)
|
||||||
}
|
}
|
||||||
|
if diags := p.validateVariables(p.vars, p.ectx); diags.HasErrors() {
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
for k := range p.funcs {
|
for k := range p.funcs {
|
||||||
if err := p.resolveFunction(p.ectx, k); err != nil {
|
if err := p.resolveFunction(p.ectx, k); err != nil {
|
||||||
@@ -723,7 +795,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
types := map[string]field{}
|
types := map[string]field{}
|
||||||
renamed := map[string]map[string][]string{}
|
renamed := map[string]map[string][]string{}
|
||||||
vt := reflect.ValueOf(val).Elem().Type()
|
vt := reflect.ValueOf(val).Elem().Type()
|
||||||
for i := 0; i < vt.NumField(); i++ {
|
for i := range vt.NumField() {
|
||||||
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
||||||
|
|
||||||
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
|
||||||
@@ -791,7 +863,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
|||||||
oldValue, exists := t.values[lblName]
|
oldValue, exists := t.values[lblName]
|
||||||
if !exists && lblExists {
|
if !exists && lblExists {
|
||||||
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
||||||
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
|
for i := range v.Elem().Field(t.idx).Len() {
|
||||||
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
||||||
exists = true
|
exists = true
|
||||||
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
||||||
@@ -858,7 +930,7 @@ func wrapErrorDiagnostic(message string, err error, subject *hcl.Range, context
|
|||||||
|
|
||||||
func setName(v reflect.Value, name string) {
|
func setName(v reflect.Value, name string) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
for _, t := range parts[1:] {
|
||||||
if t == "label" {
|
if t == "label" {
|
||||||
@@ -870,27 +942,23 @@ func setName(v reflect.Value, name string) {
|
|||||||
|
|
||||||
func getName(v reflect.Value) (string, bool) {
|
func getName(v reflect.Value) (string, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
|
||||||
return v.Elem().Field(i).String(), true
|
return v.Elem().Field(i).String(), true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNameIndex(v reflect.Value) (int, bool) {
|
func getNameIndex(v reflect.Value) (int, bool) {
|
||||||
numFields := v.Elem().Type().NumField()
|
numFields := v.Elem().Type().NumField()
|
||||||
for i := 0; i < numFields; i++ {
|
for i := range numFields {
|
||||||
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
|
||||||
for _, t := range parts[1:] {
|
if slices.Contains(parts[1:], "label") {
|
||||||
if t == "label" {
|
|
||||||
return i, true
|
return i, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -947,3 +1015,8 @@ func key(ks ...any) uint64 {
|
|||||||
}
|
}
|
||||||
return hash.Sum64()
|
return hash.Sum64()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
|
||||||
|
return dec.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|||||||
@@ -170,7 +170,6 @@ func indexOfFunc() function.Function {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cty.NilVal, errors.New("item not found")
|
return cty.NilVal, errors.New("item not found")
|
||||||
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
160
bake/hclparser/type_implied.go
Normal file
160
bake/hclparser/type_implied.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
// MIT License
|
||||||
|
//
|
||||||
|
// Copyright (c) 2017-2018 Martin Atkins
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
|
||||||
|
// to find a suitable cty.Type instance that could be used for a conversion
|
||||||
|
// with ToCtyValue.
|
||||||
|
//
|
||||||
|
// This allows -- for simple situations at least -- types to be defined just
|
||||||
|
// once in Go and the cty types derived from the Go types, but in the process
|
||||||
|
// it makes some assumptions that may be undesirable so applications are
|
||||||
|
// encouraged to build their cty types directly if exacting control is
|
||||||
|
// required.
|
||||||
|
//
|
||||||
|
// Not all Go types can be represented as cty types, so an error may be
|
||||||
|
// returned which is usually considered to be a bug in the calling program.
|
||||||
|
// In particular, ImpliedType will never use capsule types in its returned
|
||||||
|
// type, because it cannot know the capsule types supported by the calling
|
||||||
|
// program.
|
||||||
|
func ImpliedType(gv any) (cty.Type, error) {
|
||||||
|
rt := reflect.TypeOf(gv)
|
||||||
|
var path cty.Path
|
||||||
|
return impliedType(rt, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if ety, err := impliedTypeExt(rt, path); err == nil {
|
||||||
|
return ety, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch rt.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
return impliedType(rt.Elem(), path)
|
||||||
|
|
||||||
|
// Primitive types
|
||||||
|
case reflect.Bool:
|
||||||
|
return cty.Bool, nil
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.String:
|
||||||
|
return cty.String, nil
|
||||||
|
|
||||||
|
// Collection types
|
||||||
|
case reflect.Slice:
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.List(ety), nil
|
||||||
|
case reflect.Map:
|
||||||
|
if !stringType.AssignableTo(rt.Key()) {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
|
||||||
|
}
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.Map(ety), nil
|
||||||
|
|
||||||
|
// Structural types
|
||||||
|
case reflect.Struct:
|
||||||
|
return impliedStructType(rt, path)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if valueType.AssignableTo(rt) {
|
||||||
|
// Special case: cty.Value represents cty.DynamicPseudoType, for
|
||||||
|
// type conformance checking.
|
||||||
|
return cty.DynamicPseudoType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIdxs := structTagIndices(rt)
|
||||||
|
if len(fieldIdxs) == 0 {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
atys := make(map[string]cty.Type, len(fieldIdxs))
|
||||||
|
|
||||||
|
{
|
||||||
|
// Temporary extension of path for attributes
|
||||||
|
path := append(path, nil)
|
||||||
|
|
||||||
|
for k, fi := range fieldIdxs {
|
||||||
|
path[len(path)-1] = cty.GetAttrStep{Name: k}
|
||||||
|
|
||||||
|
ft := rt.Field(fi).Type
|
||||||
|
aty, err := impliedType(ft, path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
|
||||||
|
atys[k] = aty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.Object(atys), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
valueType = reflect.TypeOf(cty.Value{})
|
||||||
|
stringType = reflect.TypeOf("")
|
||||||
|
)
|
||||||
|
|
||||||
|
// structTagIndices interrogates the fields of the given type (which must
|
||||||
|
// be a struct type, or we'll panic) and returns a map from the cty
|
||||||
|
// attribute names declared via struct tags to the indices of the
|
||||||
|
// fields holding those tags.
|
||||||
|
//
|
||||||
|
// This function will panic if two fields within the struct are tagged with
|
||||||
|
// the same cty attribute name.
|
||||||
|
func structTagIndices(st reflect.Type) map[string]int {
|
||||||
|
ct := st.NumField()
|
||||||
|
ret := make(map[string]int, ct)
|
||||||
|
|
||||||
|
for i := range ct {
|
||||||
|
field := st.Field(i)
|
||||||
|
attrName := field.Tag.Get("cty")
|
||||||
|
if attrName != "" {
|
||||||
|
ret[attrName] = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
166
bake/hclparser/type_implied_ext.go
Normal file
166
bake/hclparser/type_implied_ext.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/errdefs"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ToCtyValueConverter interface {
|
||||||
|
// ToCtyValue will convert this capsule value into a native
|
||||||
|
// cty.Value. This should not return a capsule type.
|
||||||
|
ToCtyValue() cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type FromCtyValueConverter interface {
|
||||||
|
// FromCtyValue will initialize this value using a cty.Value.
|
||||||
|
FromCtyValue(in cty.Value, path cty.Path) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unwrapCapsuleValueExtension extensionType = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
rt = reflect.PointerTo(rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isCapsuleType(rt) {
|
||||||
|
return capsuleValueCapsuleType(rt), nil
|
||||||
|
}
|
||||||
|
return cty.NilType, errdefs.ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCapsuleType(rt reflect.Type) bool {
|
||||||
|
fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
|
||||||
|
}
|
||||||
|
|
||||||
|
var capsuleValueTypes sync.Map
|
||||||
|
|
||||||
|
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
panic("capsule value must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := rt.Elem()
|
||||||
|
if val, loaded := capsuleValueTypes.Load(elem); loaded {
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
|
||||||
|
// First time used. Initialize new capsule ops.
|
||||||
|
ops := &cty.CapsuleOps{
|
||||||
|
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
|
||||||
|
return func(in cty.Value, p cty.Path) (any, error) {
|
||||||
|
rv := reflect.New(elem).Interface()
|
||||||
|
if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
|
||||||
|
return func(in any, _ cty.Path) (cty.Value, error) {
|
||||||
|
rv := reflect.ValueOf(in).Convert(toCtyValueType)
|
||||||
|
v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
|
||||||
|
return convert.Convert(v, want)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ExtensionData: func(key any) any {
|
||||||
|
switch key {
|
||||||
|
case unwrapCapsuleValueExtension:
|
||||||
|
zero := reflect.Zero(elem).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
zero = reflect.Zero(rt).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to store the new type. Use whichever was loaded first in the case
|
||||||
|
// of a race condition.
|
||||||
|
ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
|
||||||
|
val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnwrapCtyValue will unwrap capsule type values into their native cty value
|
||||||
|
// equivalents if possible.
|
||||||
|
func UnwrapCtyValue(in cty.Value) cty.Value {
|
||||||
|
want := toCtyValueType(in.Type())
|
||||||
|
if in.Type().Equals(want) {
|
||||||
|
return in
|
||||||
|
} else if out, err := convert.Convert(in, want); err == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return cty.NullVal(want)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toCtyValueType(in cty.Type) cty.Type {
|
||||||
|
if et := in.MapElementType(); et != nil {
|
||||||
|
return cty.Map(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.SetElementType(); et != nil {
|
||||||
|
return cty.Set(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.ListElementType(); et != nil {
|
||||||
|
return cty.List(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsObjectType() {
|
||||||
|
var optional []string
|
||||||
|
inAttrTypes := in.AttributeTypes()
|
||||||
|
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
|
||||||
|
for name, typ := range inAttrTypes {
|
||||||
|
outAttrTypes[name] = toCtyValueType(typ)
|
||||||
|
if in.AttributeOptional(name) {
|
||||||
|
optional = append(optional, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsTupleType() {
|
||||||
|
inTypes := in.TupleElementTypes()
|
||||||
|
outTypes := make([]cty.Type, len(inTypes))
|
||||||
|
for i, typ := range inTypes {
|
||||||
|
outTypes[i] = toCtyValueType(typ)
|
||||||
|
}
|
||||||
|
return cty.Tuple(outTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsCapsuleType() {
|
||||||
|
if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
|
||||||
|
return out.(cty.Type)
|
||||||
|
}
|
||||||
|
return cty.DynamicPseudoType
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
|
||||||
|
out, err := gocty.ToCtyValue(val, ty)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
return UnwrapCtyValue(out), nil
|
||||||
|
}
|
||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/frontend/dockerui"
|
"github.com/moby/buildkit/frontend/dockerui"
|
||||||
@@ -19,6 +20,8 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const maxBakeDefinitionSize = 2 * 1024 * 1024 // 2 MB
|
||||||
|
|
||||||
type Input struct {
|
type Input struct {
|
||||||
State *llb.State
|
State *llb.State
|
||||||
URL string
|
URL string
|
||||||
@@ -106,7 +109,6 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}, ch)
|
}, ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -178,9 +180,9 @@ func filesFromURLRef(ctx context.Context, c gwclient.Client, ref gwclient.Refere
|
|||||||
name := inp.URL
|
name := inp.URL
|
||||||
inp.URL = ""
|
inp.URL = ""
|
||||||
|
|
||||||
if len(dt) > stat.Size() {
|
if int64(len(dt)) > stat.Size {
|
||||||
if stat.Size() > 1024*512 {
|
if stat.Size > maxBakeDefinitionSize {
|
||||||
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size")
|
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size (%s)", units.HumanSize(maxBakeDefinitionSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -15,9 +16,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/v2/core/images"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
@@ -39,7 +41,6 @@ import (
|
|||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
"github.com/moby/buildkit/util/tracing"
|
"github.com/moby/buildkit/util/tracing"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
@@ -50,24 +51,26 @@ import (
|
|||||||
fstypes "github.com/tonistiigi/fsutil/types"
|
fstypes "github.com/tonistiigi/fsutil/types"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56"
|
printFallbackImage = "docker/dockerfile:1.7.1@sha256:a57df69d0ea827fb7266491f2813635de6f17269be881f696fbfdf2d83dda33e"
|
||||||
printLintFallbackImage = "docker.io/docker/dockerfile-upstream:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd"
|
printLintFallbackImage = "docker/dockerfile:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
|
||||||
Ref string
|
Ref string
|
||||||
Allow []entitlements.Entitlement
|
Allow []string
|
||||||
Attests map[string]*string
|
Attests map[string]*string
|
||||||
BuildArgs map[string]string
|
BuildArgs map[string]string
|
||||||
CacheFrom []client.CacheOptionsEntry
|
CacheFrom []client.CacheOptionsEntry
|
||||||
CacheTo []client.CacheOptionsEntry
|
CacheTo []client.CacheOptionsEntry
|
||||||
CgroupParent string
|
CgroupParent string
|
||||||
Exports []client.ExportEntry
|
Exports []client.ExportEntry
|
||||||
|
ExportsLocalPathsTemporary []string // should be removed after client.ExportEntry update in buildkit v0.19.0
|
||||||
ExtraHosts []string
|
ExtraHosts []string
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
NetworkMode string
|
NetworkMode string
|
||||||
@@ -75,6 +78,8 @@ type Options struct {
|
|||||||
NoCacheFilter []string
|
NoCacheFilter []string
|
||||||
Platforms []specs.Platform
|
Platforms []specs.Platform
|
||||||
Pull bool
|
Pull bool
|
||||||
|
SecretSpecs []*controllerapi.Secret
|
||||||
|
SSHSpecs []*controllerapi.SSH
|
||||||
ShmSize opts.MemBytes
|
ShmSize opts.MemBytes
|
||||||
Tags []string
|
Tags []string
|
||||||
Target string
|
Target string
|
||||||
@@ -101,6 +106,9 @@ type Inputs struct {
|
|||||||
ContextState *llb.State
|
ContextState *llb.State
|
||||||
DockerfileInline string
|
DockerfileInline string
|
||||||
NamedContexts map[string]NamedContext
|
NamedContexts map[string]NamedContext
|
||||||
|
// DockerfileMappingSrc and DockerfileMappingDst are filled in by the builder.
|
||||||
|
DockerfileMappingSrc string
|
||||||
|
DockerfileMappingDst string
|
||||||
}
|
}
|
||||||
|
|
||||||
type NamedContext struct {
|
type NamedContext struct {
|
||||||
@@ -147,11 +155,11 @@ func toRepoOnly(in string) (string, error) {
|
|||||||
return strings.Join(out, ","), nil
|
return strings.Join(out, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
|
return BuildWithResultHandler(ctx, nodes, opts, docker, cfg, w, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
@@ -169,9 +177,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) {
|
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opts) {
|
||||||
var noOutputTargets []string
|
var noOutputTargets []string
|
||||||
for name, opt := range opt {
|
for name, opt := range opts {
|
||||||
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -192,7 +200,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drivers, err := resolveDrivers(ctx, nodes, opt, w)
|
drivers, err := resolveDrivers(ctx, nodes, opts, w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -209,7 +217,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
reqForNodes := make(map[string][]*reqForNode)
|
reqForNodes := make(map[string][]*reqForNode)
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
for k, opt := range opt {
|
for k, opt := range opts {
|
||||||
multiDriver := len(drivers[k]) > 1
|
multiDriver := len(drivers[k]) > 1
|
||||||
hasMobyDriver := false
|
hasMobyDriver := false
|
||||||
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||||
@@ -229,11 +237,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
localOpt := opt
|
||||||
|
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, &localOpt, gatewayOpts, cfg, w, docker)
|
||||||
|
opts[k] = localOpt
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil {
|
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
addGitAttrs(so)
|
addGitAttrs(so)
|
||||||
@@ -269,7 +279,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validate that all links between targets use same drivers
|
// validate that all links between targets use same drivers
|
||||||
for name := range opt {
|
for name := range opts {
|
||||||
dps := reqForNodes[name]
|
dps := reqForNodes[name]
|
||||||
for i, dp := range dps {
|
for i, dp := range dps {
|
||||||
so := reqForNodes[name][i].so
|
so := reqForNodes[name][i].so
|
||||||
@@ -305,10 +315,10 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
var respMu sync.Mutex
|
var respMu sync.Mutex
|
||||||
results := waitmap.New()
|
results := waitmap.New()
|
||||||
|
|
||||||
multiTarget := len(opt) > 1
|
multiTarget := len(opts) > 1
|
||||||
childTargets := calculateChildTargets(reqForNodes, opt)
|
childTargets := calculateChildTargets(reqForNodes, opts)
|
||||||
|
|
||||||
for k, opt := range opt {
|
for k, opt := range opts {
|
||||||
err := func(k string) (err error) {
|
err := func(k string) (err error) {
|
||||||
opt := opt
|
opt := opt
|
||||||
dps := drivers[k]
|
dps := drivers[k]
|
||||||
@@ -422,9 +432,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
FrontendInputs: frontendInputs,
|
FrontendInputs: frontendInputs,
|
||||||
FrontendOpt: make(map[string]string),
|
FrontendOpt: make(map[string]string),
|
||||||
}
|
}
|
||||||
for k, v := range so.FrontendAttrs {
|
maps.Copy(req.FrontendOpt, so.FrontendAttrs)
|
||||||
req.FrontendOpt[k] = v
|
|
||||||
}
|
|
||||||
so.Frontend = ""
|
so.Frontend = ""
|
||||||
so.FrontendInputs = nil
|
so.FrontendInputs = nil
|
||||||
|
|
||||||
@@ -494,7 +502,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
resultHandle, rr, err = NewResultHandle(ctx, cc, *so, "buildx", buildFunc, ch)
|
resultHandle, rr, err = NewResultHandle(ctx, cc, *so, "buildx", buildFunc, ch)
|
||||||
resultHandleFunc(dp.driverIndex, resultHandle)
|
resultHandleFunc(dp.driverIndex, resultHandle)
|
||||||
} else {
|
} else {
|
||||||
|
span, ctx := tracing.StartSpan(ctx, "build")
|
||||||
rr, err = c.Build(ctx, *so, "buildx", buildFunc, ch)
|
rr, err = c.Build(ctx, *so, "buildx", buildFunc, ch)
|
||||||
|
tracing.FinishWithError(span, err)
|
||||||
}
|
}
|
||||||
if !so.Internal && desktop.BuildBackendEnabled() && node.Driver.HistoryAPISupported(ctx) {
|
if !so.Internal && desktop.BuildBackendEnabled() && node.Driver.HistoryAPISupported(ctx) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -528,7 +538,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
node := dp.Node().Driver
|
node := dp.Node().Driver
|
||||||
if node.IsMobyDriver() {
|
if node.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
if e.Type == "moby" && e.Attrs["push"] != "" {
|
if e.Type == "moby" && e.Attrs["push"] != "" && !node.Features(ctx)[driver.DirectPush] {
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
pushNames = e.Attrs["name"]
|
pushNames = e.Attrs["name"]
|
||||||
if pushNames == "" {
|
if pushNames == "" {
|
||||||
@@ -611,7 +621,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||||||
// This is fallback for some very old buildkit versions.
|
// This is fallback for some very old buildkit versions.
|
||||||
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
// Note that the mediatype isn't really correct as most of the time it is image manifest and
|
||||||
// not manifest list but actually both are handled because for Docker mediatypes the
|
// not manifest list but actually both are handled because for Docker mediatypes the
|
||||||
// mediatype value in the Accpet header does not seem to matter.
|
// mediatype value in the Accept header does not seem to matter.
|
||||||
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||||
if ok {
|
if ok {
|
||||||
descs = append(descs, specs.Descriptor{
|
descs = append(descs, specs.Descriptor{
|
||||||
@@ -823,7 +833,7 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
img, _, err := api.ImageInspectWithRaw(ctx, name)
|
img, err := api.ImageInspect(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -1131,7 +1141,7 @@ func ReadSourcePolicy() (*spb.Policy, error) {
|
|||||||
var pol spb.Policy
|
var pol spb.Policy
|
||||||
if err := json.Unmarshal(data, &pol); err != nil {
|
if err := json.Unmarshal(data, &pol); err != nil {
|
||||||
// maybe it's in protobuf format?
|
// maybe it's in protobuf format?
|
||||||
e2 := pol.Unmarshal(data)
|
e2 := proto.Unmarshal(data, &pol)
|
||||||
if e2 != nil {
|
if e2 != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse source policy")
|
return nil, errors.Wrap(err, "failed to parse source policy")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"net"
|
"net"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
@@ -37,15 +38,7 @@ func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platfor
|
|||||||
for _, ls := range resolved {
|
for _, ls := range resolved {
|
||||||
for _, rn := range ls {
|
for _, rn := range ls {
|
||||||
if platform != nil {
|
if platform != nil {
|
||||||
p := *platform
|
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
|
||||||
var found bool
|
|
||||||
for _, pp := range rn.platforms {
|
|
||||||
if platforms.Only(p).Match(pp) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package build
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
@@ -221,7 +222,7 @@ func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatf
|
|||||||
for i, node := range r.nodes {
|
for i, node := range r.nodes {
|
||||||
platforms := node.Platforms
|
platforms := node.Platforms
|
||||||
if additionalPlatforms != nil {
|
if additionalPlatforms != nil {
|
||||||
platforms = append([]specs.Platform{}, platforms...)
|
platforms = slices.Clone(platforms)
|
||||||
platforms = append(platforms, additionalPlatforms(i, node)...)
|
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||||
}
|
}
|
||||||
for _, p2 := range platforms {
|
for _, p2 := range platforms {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -127,9 +128,7 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
|
|||||||
if so.FrontendAttrs == nil {
|
if so.FrontendAttrs == nil {
|
||||||
so.FrontendAttrs = make(map[string]string)
|
so.FrontendAttrs = make(map[string]string)
|
||||||
}
|
}
|
||||||
for k, v := range res {
|
maps.Copy(so.FrontendAttrs, res)
|
||||||
so.FrontendAttrs[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
if !setGitInfo || root == "" {
|
if !setGitInfo || root == "" {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/gitutil"
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/buildx/util/gitutil/gittestutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -16,23 +17,23 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func setupTest(tb testing.TB) {
|
func setupTest(tb testing.TB) {
|
||||||
gitutil.Mktmp(tb)
|
gittestutil.Mktmp(tb)
|
||||||
|
|
||||||
c, err := gitutil.New()
|
c, err := gitutil.New()
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
gitutil.GitInit(c, tb)
|
gittestutil.GitInit(c, tb)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
assert.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, tb, "Dockerfile")
|
gittestutil.GitAdd(c, tb, "Dockerfile")
|
||||||
gitutil.GitCommit(c, tb, "initial commit")
|
gittestutil.GitCommit(c, tb, "initial commit")
|
||||||
gitutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
|
gittestutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||||
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||||
@@ -47,7 +48,7 @@ func TestGetGitAttributesNoContext(t *testing.T) {
|
|||||||
setupTest(t)
|
setupTest(t)
|
||||||
|
|
||||||
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var so client.SolveOpt
|
var so client.SolveOpt
|
||||||
addGitAttrs(&so)
|
addGitAttrs(&so)
|
||||||
assert.Empty(t, so.FrontendAttrs)
|
assert.Empty(t, so.FrontendAttrs)
|
||||||
@@ -188,19 +189,19 @@ func TestLocalDirs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLocalDirsSub(t *testing.T) {
|
func TestLocalDirsSub(t *testing.T) {
|
||||||
gitutil.Mktmp(t)
|
gittestutil.Mktmp(t)
|
||||||
|
|
||||||
c, err := gitutil.New()
|
c, err := gitutil.New()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
gitutil.GitInit(c, t)
|
gittestutil.GitInit(c, t)
|
||||||
|
|
||||||
df := []byte("FROM alpine:latest\n")
|
df := []byte("FROM alpine:latest\n")
|
||||||
assert.NoError(t, os.MkdirAll("app", 0755))
|
require.NoError(t, os.MkdirAll("app", 0755))
|
||||||
assert.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||||
|
|
||||||
gitutil.GitAdd(c, t, "app/Dockerfile")
|
gittestutil.GitAdd(c, t, "app/Dockerfile")
|
||||||
gitutil.GitCommit(c, t, "initial commit")
|
gittestutil.GitCommit(c, t, "initial commit")
|
||||||
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
gittestutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
||||||
|
|
||||||
so := &client.SolveOpt{
|
so := &client.SolveOpt{
|
||||||
FrontendAttrs: map[string]string{},
|
FrontendAttrs: map[string]string{},
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
|
|
||||||
type Container struct {
|
type Container struct {
|
||||||
cancelOnce sync.Once
|
cancelOnce sync.Once
|
||||||
containerCancel func()
|
containerCancel func(error)
|
||||||
isUnavailable atomic.Bool
|
isUnavailable atomic.Bool
|
||||||
initStarted atomic.Bool
|
initStarted atomic.Bool
|
||||||
container gateway.Container
|
container gateway.Container
|
||||||
@@ -31,18 +31,18 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
|||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
go func() {
|
go func() {
|
||||||
<-mainCtx.Done()
|
<-mainCtx.Done()
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
containerCtx, containerCancel := context.WithCancel(ctx)
|
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||||
defer containerCancel()
|
defer containerCancel(errors.WithStack(context.Canceled))
|
||||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -83,7 +83,7 @@ func (c *Container) Cancel() {
|
|||||||
c.markUnavailable()
|
c.markUnavailable()
|
||||||
c.cancelOnce.Do(func() {
|
c.cancelOnce.Do(func() {
|
||||||
if c.containerCancel != nil {
|
if c.containerCancel != nil {
|
||||||
c.containerCancel()
|
c.containerCancel(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
close(c.releaseCh)
|
close(c.releaseCh)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -5,12 +5,13 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/localstate"
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
func saveLocalState(so *client.SolveOpt, target string, opts Options, node builder.Node, configDir string) error {
|
func saveLocalState(so *client.SolveOpt, target string, opts Options, node builder.Node, cfg *confutil.Config) error {
|
||||||
var err error
|
var err error
|
||||||
if so.Ref == "" {
|
if so.Ref == "" || opts.CallFunc != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
lp := opts.Inputs.ContextPath
|
lp := opts.Inputs.ContextPath
|
||||||
@@ -30,7 +31,7 @@ func saveLocalState(so *client.SolveOpt, target string, opts Options, node build
|
|||||||
if lp == "" && dp == "" {
|
if lp == "" && dp == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l, err := localstate.New(configDir)
|
l, err := localstate.New(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
23
build/opt.go
23
build/opt.go
@@ -11,8 +11,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
"github.com/containerd/containerd/content/local"
|
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
@@ -35,7 +35,7 @@ import (
|
|||||||
"github.com/tonistiigi/fsutil"
|
"github.com/tonistiigi/fsutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *Options, bopts gateway.BuildOpts, cfg *confutil.Config, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||||
nodeDriver := node.Driver
|
nodeDriver := node.Driver
|
||||||
defers := make([]func(), 0, 2)
|
defers := make([]func(), 0, 2)
|
||||||
releaseF := func() {
|
releaseF := func() {
|
||||||
@@ -263,7 +263,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||||||
so.Exports = opt.Exports
|
so.Exports = opt.Exports
|
||||||
so.Session = slices.Clone(opt.Session)
|
so.Session = slices.Clone(opt.Session)
|
||||||
|
|
||||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
releaseLoad, err := loadInputs(ctx, nodeDriver, &opt.Inputs, pw, &so)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -271,7 +271,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||||||
|
|
||||||
// add node identifier to shared key if one was specified
|
// add node identifier to shared key if one was specified
|
||||||
if so.SharedKey != "" {
|
if so.SharedKey != "" {
|
||||||
so.SharedKey += ":" + confutil.TryNodeIdentifier(configDir)
|
so.SharedKey += ":" + cfg.TryNodeIdentifier()
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.Pull {
|
if opt.Pull {
|
||||||
@@ -318,7 +318,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||||||
switch opt.NetworkMode {
|
switch opt.NetworkMode {
|
||||||
case "host":
|
case "host":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
|
||||||
case "none":
|
case "none":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
@@ -356,7 +356,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||||||
return &so, releaseF, nil
|
return &so, releaseF, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||||
if inp.ContextPath == "" {
|
if inp.ContextPath == "" {
|
||||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||||
}
|
}
|
||||||
@@ -368,6 +368,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||||||
dockerfileReader io.ReadCloser
|
dockerfileReader io.ReadCloser
|
||||||
dockerfileDir string
|
dockerfileDir string
|
||||||
dockerfileName = inp.DockerfilePath
|
dockerfileName = inp.DockerfilePath
|
||||||
|
dockerfileSrcName = inp.DockerfilePath
|
||||||
toRemove []string
|
toRemove []string
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -440,6 +441,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||||||
|
|
||||||
if inp.DockerfileInline != "" {
|
if inp.DockerfileInline != "" {
|
||||||
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||||
|
dockerfileSrcName = "inline"
|
||||||
|
} else if inp.DockerfilePath == "-" {
|
||||||
|
dockerfileSrcName = "stdin"
|
||||||
|
} else if inp.DockerfilePath == "" {
|
||||||
|
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||||
}
|
}
|
||||||
|
|
||||||
if dockerfileReader != nil {
|
if dockerfileReader != nil {
|
||||||
@@ -540,6 +546,9 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||||||
_ = os.RemoveAll(dir)
|
_ = os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inp.DockerfileMappingSrc = dockerfileSrcName
|
||||||
|
inp.DockerfileMappingDst = dockerfileName
|
||||||
return release, nil
|
return release, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,16 +5,18 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
"github.com/containerd/containerd/content/proxy"
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -39,9 +41,7 @@ func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.Solve
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for k, v := range res {
|
maps.Copy(sr.ExporterResponse, res)
|
||||||
sr.ExporterResponse[k] = v
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -124,8 +124,8 @@ func lookupProvenance(res *controlapi.BuildResultInfo) *ocispecs.Descriptor {
|
|||||||
for _, a := range res.Attestations {
|
for _, a := range res.Attestations {
|
||||||
if a.MediaType == "application/vnd.in-toto+json" && strings.HasPrefix(a.Annotations["in-toto.io/predicate-type"], "https://slsa.dev/provenance/") {
|
if a.MediaType == "application/vnd.in-toto+json" && strings.HasPrefix(a.Annotations["in-toto.io/predicate-type"], "https://slsa.dev/provenance/") {
|
||||||
return &ocispecs.Descriptor{
|
return &ocispecs.Descriptor{
|
||||||
Digest: a.Digest,
|
Digest: digest.Digest(a.Digest),
|
||||||
Size: a.Size_,
|
Size: a.Size,
|
||||||
MediaType: a.MediaType,
|
MediaType: a.MediaType,
|
||||||
Annotations: a.Annotations,
|
Annotations: a.Annotations,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func generateRandomData(size int) []byte {
|
func generateRandomData(size int) []byte {
|
||||||
@@ -29,11 +28,11 @@ func TestSyncMultiReaderParallel(t *testing.T) {
|
|||||||
|
|
||||||
readers := make([]io.ReadCloser, numReaders)
|
readers := make([]io.ReadCloser, numReaders)
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
readers[i] = mr.NewReadCloser()
|
readers[i] = mr.NewReadCloser()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < numReaders; i++ {
|
for i := range numReaders {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(readerId int) {
|
go func(readerId int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -57,7 +56,7 @@ func TestSyncMultiReaderParallel(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, err, "Reader %d error", readerId)
|
assert.NoError(t, err, "Reader %d error", readerId)
|
||||||
|
|
||||||
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||||
t.Logf("Reader %d closing", readerId)
|
t.Logf("Reader %d closing", readerId)
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
var respHandle *ResultHandle
|
var respHandle *ResultHandle
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer cancel(context.Canceled) // ensure no dangling processes
|
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||||
|
|
||||||
var res *gateway.Result
|
var res *gateway.Result
|
||||||
var err error
|
var err error
|
||||||
@@ -181,7 +181,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
case <-respHandle.done:
|
case <-respHandle.done:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
}, nil)
|
}, nil)
|
||||||
if respHandle != nil {
|
if respHandle != nil {
|
||||||
return
|
return
|
||||||
@@ -295,14 +295,14 @@ func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
|||||||
func (r *ResultHandle) getContainerConfig(cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
func (r *ResultHandle) getContainerConfig(cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||||
if r.res != nil && r.solveErr == nil {
|
if r.res != nil && r.solveErr == nil {
|
||||||
logrus.Debugf("creating container from successful build")
|
logrus.Debugf("creating container from successful build")
|
||||||
ccfg, err := containerConfigFromResult(r.res, *cfg)
|
ccfg, err := containerConfigFromResult(r.res, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return containerCfg, err
|
return containerCfg, err
|
||||||
}
|
}
|
||||||
containerCfg = *ccfg
|
containerCfg = *ccfg
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("creating container from failed build %+v", cfg)
|
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||||
ccfg, err := containerConfigFromError(r.solveErr, *cfg)
|
ccfg, err := containerConfigFromError(r.solveErr, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return containerCfg, errors.Wrapf(err, "no result nor error is available")
|
return containerCfg, errors.Wrapf(err, "no result nor error is available")
|
||||||
}
|
}
|
||||||
@@ -315,19 +315,19 @@ func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin i
|
|||||||
processCfg := newStartRequest(stdin, stdout, stderr)
|
processCfg := newStartRequest(stdin, stdout, stderr)
|
||||||
if r.res != nil && r.solveErr == nil {
|
if r.res != nil && r.solveErr == nil {
|
||||||
logrus.Debugf("creating container from successful build")
|
logrus.Debugf("creating container from successful build")
|
||||||
if err := populateProcessConfigFromResult(&processCfg, r.res, *cfg); err != nil {
|
if err := populateProcessConfigFromResult(&processCfg, r.res, cfg); err != nil {
|
||||||
return processCfg, err
|
return processCfg, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("creating container from failed build %+v", cfg)
|
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||||
if err := populateProcessConfigFromError(&processCfg, r.solveErr, *cfg); err != nil {
|
if err := populateProcessConfigFromError(&processCfg, r.solveErr, cfg); err != nil {
|
||||||
return processCfg, err
|
return processCfg, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return processCfg, nil
|
return processCfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerConfigFromResult(res *gateway.Result, cfg controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
func containerConfigFromResult(res *gateway.Result, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||||
if cfg.Initial {
|
if cfg.Initial {
|
||||||
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
||||||
}
|
}
|
||||||
@@ -352,7 +352,7 @@ func containerConfigFromResult(res *gateway.Result, cfg controllerapi.InvokeConf
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Result, cfg controllerapi.InvokeConfig) error {
|
func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Result, cfg *controllerapi.InvokeConfig) error {
|
||||||
imgData := res.Metadata[exptypes.ExporterImageConfigKey]
|
imgData := res.Metadata[exptypes.ExporterImageConfigKey]
|
||||||
var img *specs.Image
|
var img *specs.Image
|
||||||
if len(imgData) > 0 {
|
if len(imgData) > 0 {
|
||||||
@@ -403,7 +403,7 @@ func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Res
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerConfigFromError(solveErr *errdefs.SolveError, cfg controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
func containerConfigFromError(solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||||
exec, err := execOpFromError(solveErr)
|
exec, err := execOpFromError(solveErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -431,7 +431,7 @@ func containerConfigFromError(solveErr *errdefs.SolveError, cfg controllerapi.In
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func populateProcessConfigFromError(req *gateway.StartRequest, solveErr *errdefs.SolveError, cfg controllerapi.InvokeConfig) error {
|
func populateProcessConfigFromError(req *gateway.StartRequest, solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) error {
|
||||||
exec, err := execOpFromError(solveErr)
|
exec, err := execOpFromError(solveErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -7,12 +7,15 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const maxDockerfileSize = 2 * 1024 * 1024 // 2 MB
|
||||||
|
|
||||||
func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, url string, pw progress.Writer) (string, error) {
|
func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, url string, pw progress.Writer) (string, error) {
|
||||||
c, err := driver.Boot(ctx, ctx, d, pw)
|
c, err := driver.Boot(ctx, ctx, d, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -43,8 +46,8 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if stat.Size() > 512*1024 {
|
if stat.Size > maxDockerfileSize {
|
||||||
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size", url)
|
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size (%s)", url, units.HumanSize(maxDockerfileSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||||
@@ -63,7 +66,6 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
|||||||
out = dir
|
out = dir
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}, ch)
|
}, ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ func TestToBuildkitExtraHosts(t *testing.T) {
|
|||||||
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
||||||
if tc.expectedErr == "" {
|
if tc.expectedErr == "" {
|
||||||
require.Equal(t, tc.expectedOut, actualOut)
|
require.Equal(t, tc.expectedOut, actualOut)
|
||||||
require.Nil(t, actualErr)
|
require.NoError(t, actualErr)
|
||||||
} else {
|
} else {
|
||||||
require.Zero(t, actualOut)
|
require.Zero(t, actualOut)
|
||||||
require.Error(t, actualErr, tc.expectedErr)
|
require.Error(t, actualErr, tc.expectedErr)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -199,7 +200,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && len(errCh) == len(toBoot) {
|
if err == nil && len(errCh) > 0 {
|
||||||
return false, <-errCh
|
return false, <-errCh
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
@@ -288,7 +289,15 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
builders := make([]*Builder, len(storeng))
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Slice(contexts, func(i, j int) bool {
|
||||||
|
return contexts[i].Name < contexts[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for i, ng := range storeng {
|
for i, ng := range storeng {
|
||||||
b, err := New(dockerCli,
|
b, err := New(dockerCli,
|
||||||
@@ -303,14 +312,6 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||||||
seen[b.NodeGroup.Name] = struct{}{}
|
seen[b.NodeGroup.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
|
||||||
return contexts[i].Name < contexts[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, c := range contexts {
|
for _, c := range contexts {
|
||||||
// if a context has the same name as an instance from the store, do not
|
// if a context has the same name as an instance from the store, do not
|
||||||
// add it to the builders list. An instance from the store takes
|
// add it to the builders list. An instance from the store takes
|
||||||
@@ -435,7 +436,16 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buildkitdFlags, err := parseBuildkitdFlags(opts.BuildkitdFlags, driverName, driverOpts)
|
buildkitdConfigFile := opts.BuildkitdConfigFile
|
||||||
|
if buildkitdConfigFile == "" {
|
||||||
|
// if buildkit daemon config is not provided, check if the default one
|
||||||
|
// is available and use it
|
||||||
|
if f, ok := confutil.NewConfig(dockerCli).BuildKitConfigFile(); ok {
|
||||||
|
buildkitdConfigFile = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitdFlags, err := parseBuildkitdFlags(opts.BuildkitdFlags, driverName, driverOpts, buildkitdConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -496,15 +506,6 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||||||
setEp = false
|
setEp = false
|
||||||
}
|
}
|
||||||
|
|
||||||
buildkitdConfigFile := opts.BuildkitdConfigFile
|
|
||||||
if buildkitdConfigFile == "" {
|
|
||||||
// if buildkit daemon config is not provided, check if the default one
|
|
||||||
// is available and use it
|
|
||||||
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
|
||||||
buildkitdConfigFile = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ng.Update(opts.NodeName, ep, opts.Platforms, setEp, opts.Append, buildkitdFlags, buildkitdConfigFile, driverOpts); err != nil {
|
if err := ng.Update(opts.NodeName, ep, opts.Platforms, setEp, opts.Append, buildkitdFlags, buildkitdConfigFile, driverOpts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -522,8 +523,9 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -584,7 +586,7 @@ func Leave(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Leav
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ls, err := localstate.New(confutil.ConfigDir(dockerCli))
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -641,7 +643,7 @@ func validateBuildkitEndpoint(ep string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parseBuildkitdFlags parses buildkit flags
|
// parseBuildkitdFlags parses buildkit flags
|
||||||
func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string) (res []string, err error) {
|
func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string, buildkitdConfigFile string) (res []string, err error) {
|
||||||
if inp != "" {
|
if inp != "" {
|
||||||
res, err = shlex.Split(inp)
|
res, err = shlex.Split(inp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -655,18 +657,26 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
|
|||||||
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||||
_ = flags.Parse(res)
|
_ = flags.Parse(res)
|
||||||
|
|
||||||
var hasNetworkHostEntitlement bool
|
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
|
||||||
for _, e := range allowInsecureEntitlements {
|
|
||||||
if e == "network.host" {
|
var hasNetworkHostEntitlementInConf bool
|
||||||
hasNetworkHostEntitlement = true
|
if buildkitdConfigFile != "" {
|
||||||
break
|
btoml, err := confutil.LoadConfigTree(buildkitdConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if btoml != nil {
|
||||||
|
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||||
|
if slices.Contains(ies.([]string), "network.host") {
|
||||||
|
hasNetworkHostEntitlementInConf = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := driverOpts["network"]; ok && v == "host" && !hasNetworkHostEntitlement && driver == "docker-container" {
|
if v, ok := driverOpts["network"]; ok && v == "host" && !hasNetworkHostEntitlement && driver == "docker-container" {
|
||||||
// always set network.host entitlement if user has set network=host
|
// always set network.host entitlement if user has set network=host
|
||||||
res = append(res, "--allow-insecure-entitlement=network.host")
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
} else if len(allowInsecureEntitlements) == 0 && (driver == "kubernetes" || driver == "docker-container") {
|
} else if len(allowInsecureEntitlements) == 0 && !hasNetworkHostEntitlementInConf && (driver == "kubernetes" || driver == "docker-container") {
|
||||||
// set network.host entitlement if user does not provide any as
|
// set network.host entitlement if user does not provide any as
|
||||||
// network is isolated for container drivers.
|
// network is isolated for container drivers.
|
||||||
res = append(res, "--allow-insecure-entitlement=network.host")
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package builder
|
package builder
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -17,21 +19,46 @@ func TestCsvToMap(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Contains(t, r, "tolerations")
|
require.Contains(t, r, "tolerations")
|
||||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
require.Equal(t, "key=foo,value=bar;key=foo2,value=bar2", r["tolerations"])
|
||||||
|
|
||||||
require.Contains(t, r, "replicas")
|
require.Contains(t, r, "replicas")
|
||||||
require.Equal(t, r["replicas"], "1")
|
require.Equal(t, "1", r["replicas"])
|
||||||
|
|
||||||
require.Contains(t, r, "namespace")
|
require.Contains(t, r, "namespace")
|
||||||
require.Equal(t, r["namespace"], "default")
|
require.Equal(t, "default", r["namespace"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseBuildkitdFlags(t *testing.T) {
|
func TestParseBuildkitdFlags(t *testing.T) {
|
||||||
|
dirConf := t.TempDir()
|
||||||
|
|
||||||
|
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
|
||||||
|
# debug enables additional debug logging
|
||||||
|
debug = true
|
||||||
|
# insecure-entitlements allows insecure entitlements, disabled by default.
|
||||||
|
insecure-entitlements = [ "network.host", "security.insecure" ]
|
||||||
|
[log]
|
||||||
|
# log formatter: json or text
|
||||||
|
format = "text"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
|
||||||
|
[worker.oci]
|
||||||
|
gc = "maybe"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
|
||||||
|
foo = "bar"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
flags string
|
flags string
|
||||||
driver string
|
driver string
|
||||||
driverOpts map[string]string
|
driverOpts map[string]string
|
||||||
|
buildkitdConfigFile string
|
||||||
expected []string
|
expected []string
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
@@ -40,6 +67,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"",
|
"",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
},
|
},
|
||||||
@@ -50,6 +78,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"",
|
"",
|
||||||
"kubernetes",
|
"kubernetes",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
},
|
},
|
||||||
@@ -60,6 +89,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"",
|
"",
|
||||||
"remote",
|
"remote",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
nil,
|
nil,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
@@ -68,6 +98,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"--allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
},
|
},
|
||||||
@@ -78,6 +109,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
"--allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
@@ -89,6 +121,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"",
|
"",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
map[string]string{"network": "host"},
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
},
|
},
|
||||||
@@ -99,6 +132,7 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
map[string]string{"network": "host"},
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
},
|
},
|
||||||
@@ -109,25 +143,56 @@ func TestParseBuildkitdFlags(t *testing.T) {
|
|||||||
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
map[string]string{"network": "host"},
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
[]string{
|
[]string{
|
||||||
"--allow-insecure-entitlement=network.host",
|
"--allow-insecure-entitlement=network.host",
|
||||||
"--allow-insecure-entitlement=security.insecure",
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"docker-container with buildkitd conf setting network.host entitlement",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfPath,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"error parsing flags",
|
"error parsing flags",
|
||||||
"foo'",
|
"foo'",
|
||||||
"docker-container",
|
"docker-container",
|
||||||
nil,
|
nil,
|
||||||
|
"",
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"error parsing buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfBrokenPath,
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"unknown field in buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfUnknownFieldPath,
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
flags, err := parseBuildkitdFlags(tt.flags, tt.driver, tt.driverOpts)
|
flags, err := parseBuildkitdFlags(tt.flags, tt.driver, tt.driverOpts, tt.buildkitdConfigFile)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ type Node struct {
|
|||||||
Platforms []ocispecs.Platform
|
Platforms []ocispecs.Platform
|
||||||
GCPolicy []client.PruneInfo
|
GCPolicy []client.PruneInfo
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
|
CDIDevices []client.CDIDevice
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nodes returns nodes for this builder.
|
// Nodes returns nodes for this builder.
|
||||||
@@ -168,7 +169,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
|||||||
// dynamic nodes are used in Kubernetes driver.
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
||||||
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
||||||
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
|
for i := range di.DriverInfo.DynamicNodes {
|
||||||
diClone := di
|
diClone := di
|
||||||
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
diClone.Platforms = pl
|
diClone.Platforms = pl
|
||||||
@@ -259,6 +260,7 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
|
|||||||
n.GCPolicy = w.GCPolicy
|
n.GCPolicy = w.GCPolicy
|
||||||
n.Labels = w.Labels
|
n.Labels = w.Labels
|
||||||
}
|
}
|
||||||
|
n.CDIDevices = w.CDIDevices
|
||||||
}
|
}
|
||||||
sort.Strings(n.IDs)
|
sort.Strings(n.IDs)
|
||||||
n.Platforms = platformutil.Dedupe(n.Platforms)
|
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||||
|
|||||||
75
cmd/buildx/debug.go
Normal file
75
cmd/buildx/debug.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"runtime/pprof"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/util/bklog"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupDebugProfiles(ctx context.Context) (stop func()) {
|
||||||
|
var stopFuncs []func()
|
||||||
|
if fn := setupCPUProfile(ctx); fn != nil {
|
||||||
|
stopFuncs = append(stopFuncs, fn)
|
||||||
|
}
|
||||||
|
if fn := setupHeapProfile(ctx); fn != nil {
|
||||||
|
stopFuncs = append(stopFuncs, fn)
|
||||||
|
}
|
||||||
|
return func() {
|
||||||
|
for _, fn := range stopFuncs {
|
||||||
|
fn()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupCPUProfile(ctx context.Context) (stop func()) {
|
||||||
|
if cpuProfile := os.Getenv("BUILDX_CPU_PROFILE"); cpuProfile != "" {
|
||||||
|
f, err := os.Create(cpuProfile)
|
||||||
|
if err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not create cpu profile", logrus.WithError(err))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pprof.StartCPUProfile(f); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not start cpu profile", logrus.WithError(err))
|
||||||
|
_ = f.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
pprof.StopCPUProfile()
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not close file for cpu profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupHeapProfile(ctx context.Context) (stop func()) {
|
||||||
|
if heapProfile := os.Getenv("BUILDX_MEM_PROFILE"); heapProfile != "" {
|
||||||
|
// Memory profile is only created on stop.
|
||||||
|
return func() {
|
||||||
|
f, err := os.Create(heapProfile)
|
||||||
|
if err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not create memory profile", logrus.WithError(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// get up-to-date statistics
|
||||||
|
runtime.GC()
|
||||||
|
|
||||||
|
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not write memory profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not close file for memory profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -4,45 +4,41 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
|
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli-plugins/manager"
|
"github.com/docker/cli/cli-plugins/metadata"
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
//nolint:staticcheck // vendored dependencies may still use this
|
|
||||||
"github.com/containerd/containerd/pkg/seed"
|
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
|
|
||||||
_ "github.com/docker/buildx/driver/docker"
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
_ "github.com/docker/buildx/driver/docker-container"
|
_ "github.com/docker/buildx/driver/docker-container"
|
||||||
_ "github.com/docker/buildx/driver/kubernetes"
|
_ "github.com/docker/buildx/driver/kubernetes"
|
||||||
_ "github.com/docker/buildx/driver/remote"
|
_ "github.com/docker/buildx/driver/remote"
|
||||||
|
|
||||||
|
// Use custom grpc codec to utilize vtprotobuf
|
||||||
|
_ "github.com/moby/buildkit/util/grpcutil/encoding/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//nolint:staticcheck
|
|
||||||
seed.WithTimeAndRand()
|
|
||||||
|
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runStandalone(cmd *command.DockerCli) error {
|
func runStandalone(cmd *command.DockerCli) error {
|
||||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer flushMetrics(cmd)
|
defer flushMetrics(cmd)
|
||||||
|
executable := os.Args[0]
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
||||||
return rootCmd.Execute()
|
return rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,13 +59,23 @@ func flushMetrics(cmd *command.DockerCli) {
|
|||||||
|
|
||||||
func runPlugin(cmd *command.DockerCli) error {
|
func runPlugin(cmd *command.DockerCli) error {
|
||||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
||||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
return plugin.RunPlugin(cmd, rootCmd, metadata.Metadata{
|
||||||
SchemaVersion: "0.1.0",
|
SchemaVersion: "0.1.0",
|
||||||
Vendor: "Docker Inc.",
|
Vendor: "Docker Inc.",
|
||||||
Version: version.Version,
|
Version: version.Version,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func run(cmd *command.DockerCli) error {
|
||||||
|
stopProfiles := setupDebugProfiles(context.TODO())
|
||||||
|
defer stopProfiles()
|
||||||
|
|
||||||
|
if plugin.RunningStandalone() {
|
||||||
|
return runStandalone(cmd)
|
||||||
|
}
|
||||||
|
return runPlugin(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd, err := command.NewDockerCli()
|
cmd, err := command.NewDockerCli()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,15 +83,11 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if plugin.RunningStandalone() {
|
if err = run(cmd); err == nil {
|
||||||
err = runStandalone(cmd)
|
|
||||||
} else {
|
|
||||||
err = runPlugin(cmd)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check the error from the run function above.
|
||||||
if sterr, ok := err.(cli.StatusError); ok {
|
if sterr, ok := err.(cli.StatusError); ok {
|
||||||
if sterr.Status != "" {
|
if sterr.Status != "" {
|
||||||
fmt.Fprintln(cmd.Err(), sterr.Status)
|
fmt.Fprintln(cmd.Err(), sterr.Status)
|
||||||
@@ -106,8 +108,15 @@ func main() {
|
|||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
||||||
}
|
}
|
||||||
if ebr, ok := err.(*desktop.ErrorWithBuildRef); ok {
|
|
||||||
|
var ebr *desktop.ErrorWithBuildRef
|
||||||
|
if errors.As(err, &ebr) {
|
||||||
ebr.Print(cmd.Err())
|
ebr.Print(cmd.Err())
|
||||||
|
} else {
|
||||||
|
var be *controllererrors.BuildError
|
||||||
|
if errors.As(err, &be) {
|
||||||
|
be.PrintBuildDetails(cmd.Err())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
222
commands/bake.go
222
commands/bake.go
@@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/docker/buildx/controller/pb"
|
"github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/localstate"
|
"github.com/docker/buildx/localstate"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
@@ -38,15 +37,14 @@ import (
|
|||||||
"github.com/moby/buildkit/util/progress/progressui"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
overrides []string
|
overrides []string
|
||||||
printOnly bool
|
|
||||||
listTargets bool
|
|
||||||
listVars bool
|
|
||||||
sbom string
|
sbom string
|
||||||
provenance string
|
provenance string
|
||||||
allow []string
|
allow []string
|
||||||
@@ -56,12 +54,23 @@ type bakeOptions struct {
|
|||||||
exportPush bool
|
exportPush bool
|
||||||
exportLoad bool
|
exportLoad bool
|
||||||
callFunc string
|
callFunc string
|
||||||
|
|
||||||
|
print bool
|
||||||
|
list string
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
listTargets bool
|
||||||
|
listVars bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
|
||||||
|
attribute.String("builder", in.builder),
|
||||||
|
attribute.StringSlice("targets", targets),
|
||||||
|
attribute.StringSlice("files", in.files),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -107,16 +116,27 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get current working directory")
|
||||||
|
}
|
||||||
|
// filesystem access under the current working directory is allowed by default
|
||||||
|
ent.FSRead = append(ent.FSRead, wd)
|
||||||
|
ent.FSWrite = append(ent.FSWrite, wd)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer cancel(errors.WithStack(context.Canceled))
|
||||||
|
|
||||||
var nodes []builder.Node
|
var nodes []builder.Node
|
||||||
var progressConsoleDesc, progressTextDesc string
|
var progressConsoleDesc, progressTextDesc string
|
||||||
|
|
||||||
|
if in.print && in.list != "" {
|
||||||
|
return errors.New("--print and --list are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
// instance only needed for reading remote bake files or building
|
// instance only needed for reading remote bake files or building
|
||||||
var driverType string
|
var driverType string
|
||||||
if url != "" || !in.printOnly {
|
if url != "" || !(in.print || in.list != "") {
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(in.builder),
|
||||||
builder.WithContextPathHash(contextPathHash),
|
builder.WithContextPathHash(contextPathHash),
|
||||||
@@ -177,7 +197,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.listTargets || in.listVars {
|
if in.list != "" {
|
||||||
cfg, pm, err := bake.ParseFiles(files, defaults)
|
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -185,14 +205,19 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err = printer.Wait(); err != nil {
|
if err = printer.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if in.listTargets {
|
list, err := parseList(in.list)
|
||||||
return printTargetList(dockerCli.Out(), cfg)
|
if err != nil {
|
||||||
} else if in.listVars {
|
return err
|
||||||
return printVars(dockerCli.Out(), pm.AllVariables)
|
}
|
||||||
|
switch list.Type {
|
||||||
|
case "targets":
|
||||||
|
return printTargetList(dockerCli.Out(), list.Format, cfg)
|
||||||
|
case "variables":
|
||||||
|
return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults)
|
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults, &ent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -224,7 +249,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
Target: tgts,
|
Target: tgts,
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
if in.print {
|
||||||
if err = printer.Wait(); err != nil {
|
if err = printer.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -250,9 +275,11 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
if progressMode != progressui.RawJSONMode {
|
||||||
|
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if printer.IsDone() {
|
if printer.IsDone() {
|
||||||
// init new printer as old one was stopped to show the prompt
|
// init new printer as old one was stopped to show the prompt
|
||||||
if err := makePrinter(); err != nil {
|
if err := makePrinter(); err != nil {
|
||||||
@@ -260,12 +287,12 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
done := timeBuildCommand(mp, attributes)
|
done := timeBuildCommand(mp, attributes)
|
||||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), printer)
|
||||||
if err := printer.Wait(); retErr == nil {
|
if err := printer.Wait(); retErr == nil {
|
||||||
retErr = err
|
retErr = err
|
||||||
}
|
}
|
||||||
@@ -282,7 +309,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||||
}
|
}
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 {
|
||||||
dt := make(map[string]interface{})
|
dt := make(map[string]any)
|
||||||
for t, r := range resp {
|
for t, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||||
}
|
}
|
||||||
@@ -335,7 +362,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
if callFormatJSON {
|
if callFormatJSON {
|
||||||
jsonResults[name] = map[string]any{}
|
jsonResults[name] = map[string]any{}
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if code, err := printResult(buf, pf, res); err != nil {
|
if code, err := printResult(buf, pf, res, name, &req.Inputs); err != nil {
|
||||||
jsonResults[name]["error"] = err.Error()
|
jsonResults[name]["error"] = err.Error()
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
} else if code != 0 && exitCode == 0 {
|
} else if code != 0 && exitCode == 0 {
|
||||||
@@ -361,7 +388,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintln(dockerCli.Out())
|
fmt.Fprintln(dockerCli.Out())
|
||||||
if code, err := printResult(dockerCli.Out(), pf, res); err != nil {
|
if code, err := printResult(dockerCli.Out(), pf, res, name, &req.Inputs); err != nil {
|
||||||
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
} else if code != 0 && exitCode == 0 {
|
} else if code != 0 && exitCode == 0 {
|
||||||
@@ -397,6 +424,14 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
if sp, ok := resp[name]; ok {
|
||||||
|
if v, ok := sp.ExporterResponse["frontend.result.inlinemessage"]; ok {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n# %s\n%s\n", name, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if exitCode != 0 {
|
if exitCode != 0 {
|
||||||
os.Exit(exitCode)
|
os.Exit(exitCode)
|
||||||
}
|
}
|
||||||
@@ -420,6 +455,13 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
if !cmd.Flags().Lookup("pull").Changed {
|
if !cmd.Flags().Lookup("pull").Changed {
|
||||||
cFlags.pull = nil
|
cFlags.pull = nil
|
||||||
}
|
}
|
||||||
|
if options.list == "" {
|
||||||
|
if options.listTargets {
|
||||||
|
options.list = "targets"
|
||||||
|
} else if options.listVars {
|
||||||
|
options.list = "variables"
|
||||||
|
}
|
||||||
|
}
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
options.metadataFile = cFlags.metadataFile
|
options.metadataFile = cFlags.metadataFile
|
||||||
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
||||||
@@ -432,7 +474,6 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||||
@@ -443,20 +484,30 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||||
flags.Lookup("check").NoOptDefVal = "true"
|
flags.Lookup("check").NoOptDefVal = "true"
|
||||||
|
|
||||||
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
flags.BoolVar(&options.print, "print", false, "Print the options without building")
|
||||||
cobrautil.MarkFlagsExperimental(flags, "list-targets")
|
flags.StringVar(&options.list, "list", "", "List targets or variables")
|
||||||
flags.MarkHidden("list-targets")
|
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||||
|
flags.MarkHidden("list-targets")
|
||||||
|
flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
|
||||||
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||||
cobrautil.MarkFlagsExperimental(flags, "list-variables")
|
|
||||||
flags.MarkHidden("list-variables")
|
flags.MarkHidden("list-variables")
|
||||||
|
flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
|
||||||
|
|
||||||
commonBuildFlags(&cFlags, flags)
|
commonBuildFlags(&cFlags, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
|
||||||
|
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer l.MigrateIfNeeded()
|
||||||
|
|
||||||
prm := confutil.MetadataProvenance()
|
prm := confutil.MetadataProvenance()
|
||||||
if len(in.metadataFile) == 0 {
|
if len(in.metadataFile) == 0 {
|
||||||
prm = confutil.MetadataProvenanceModeDisabled
|
prm = confutil.MetadataProvenanceModeDisabled
|
||||||
@@ -464,25 +515,22 @@ func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string
|
|||||||
groupRef := identity.NewID()
|
groupRef := identity.NewID()
|
||||||
refs := make([]string, 0, len(bo))
|
refs := make([]string, 0, len(bo))
|
||||||
for k, b := range bo {
|
for k, b := range bo {
|
||||||
|
if b.CallFunc != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
b.Ref = identity.NewID()
|
b.Ref = identity.NewID()
|
||||||
b.GroupRef = groupRef
|
b.GroupRef = groupRef
|
||||||
b.ProvenanceResponseMode = prm
|
b.ProvenanceResponseMode = prm
|
||||||
refs = append(refs, b.Ref)
|
refs = append(refs, b.Ref)
|
||||||
bo[k] = b
|
bo[k] = b
|
||||||
}
|
}
|
||||||
l, err := localstate.New(confutil.ConfigDir(dockerCli))
|
if len(refs) == 0 {
|
||||||
if err != nil {
|
return nil
|
||||||
return err
|
|
||||||
}
|
|
||||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||||
Definition: dtdef,
|
|
||||||
Targets: targets,
|
|
||||||
Inputs: overrides,
|
|
||||||
Refs: refs,
|
Refs: refs,
|
||||||
|
Targets: targets,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,10 +592,70 @@ func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
type listEntry struct {
|
||||||
|
Type string
|
||||||
|
Format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseList(input string) (listEntry, error) {
|
||||||
|
res := listEntry{}
|
||||||
|
|
||||||
|
fields, err := csvvalue.Fields(input, nil)
|
||||||
|
if err != nil {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
|
||||||
|
res.Type = input
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.Type == "" {
|
||||||
|
for _, field := range fields {
|
||||||
|
key, value, ok := strings.Cut(field, "=")
|
||||||
|
if !ok {
|
||||||
|
return res, errors.Errorf("invalid value %s", field)
|
||||||
|
}
|
||||||
|
key = strings.TrimSpace(strings.ToLower(key))
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
res.Type = value
|
||||||
|
case "format":
|
||||||
|
res.Format = value
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Format == "" {
|
||||||
|
res.Format = "table"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Type {
|
||||||
|
case "targets", "variables":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list type %q", res.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Format {
|
||||||
|
case "table", "json":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list format %q", res.Format)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
|
||||||
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||||
return cmp.Compare(a.Name, b.Name)
|
return cmp.Compare(a.Name, b.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(vars)
|
||||||
|
}
|
||||||
|
|
||||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
defer tw.Flush()
|
defer tw.Flush()
|
||||||
|
|
||||||
@@ -565,12 +673,7 @@ func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printTargetList(w io.Writer, cfg *bake.Config) error {
|
func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
|
||||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
|
||||||
defer tw.Flush()
|
|
||||||
|
|
||||||
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
|
||||||
|
|
||||||
type targetOrGroup struct {
|
type targetOrGroup struct {
|
||||||
name string
|
name string
|
||||||
target *bake.Target
|
target *bake.Target
|
||||||
@@ -589,6 +692,20 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
return cmp.Compare(a.name, b.name)
|
return cmp.Compare(a.name, b.name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var tw *tabwriter.Writer
|
||||||
|
if format == "table" {
|
||||||
|
tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
type targetList struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Group bool `json:"group,omitempty"`
|
||||||
|
}
|
||||||
|
var targetsList []targetList
|
||||||
|
|
||||||
for _, tgt := range list {
|
for _, tgt := range list {
|
||||||
if strings.HasPrefix(tgt.name, "_") {
|
if strings.HasPrefix(tgt.name, "_") {
|
||||||
// convention for a private target
|
// convention for a private target
|
||||||
@@ -597,9 +714,9 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
var descr string
|
var descr string
|
||||||
if tgt.target != nil {
|
if tgt.target != nil {
|
||||||
descr = tgt.target.Description
|
descr = tgt.target.Description
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
|
||||||
} else if tgt.group != nil {
|
} else if tgt.group != nil {
|
||||||
descr = tgt.group.Description
|
descr = tgt.group.Description
|
||||||
|
|
||||||
if len(tgt.group.Targets) > 0 {
|
if len(tgt.group.Targets) > 0 {
|
||||||
slices.Sort(tgt.group.Targets)
|
slices.Sort(tgt.group.Targets)
|
||||||
names := strings.Join(tgt.group.Targets, ", ")
|
names := strings.Join(tgt.group.Targets, ", ")
|
||||||
@@ -609,9 +726,18 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
|||||||
descr = names
|
descr = names
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
|
||||||
}
|
}
|
||||||
|
if format == "table" {
|
||||||
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(targetsList)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -621,7 +747,7 @@ func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext str
|
|||||||
commandNameAttribute.String("bake"),
|
commandNameAttribute.String("bake"),
|
||||||
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||||
bakeOptions: options,
|
bakeOptions: options,
|
||||||
configDir: confutil.ConfigDir(dockerCli),
|
cfg: confutil.NewConfig(dockerCli),
|
||||||
url: url,
|
url: url,
|
||||||
cmdContext: cmdContext,
|
cmdContext: cmdContext,
|
||||||
targets: targets,
|
targets: targets,
|
||||||
@@ -633,7 +759,7 @@ func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext str
|
|||||||
|
|
||||||
type bakeOptionsHash struct {
|
type bakeOptionsHash struct {
|
||||||
*bakeOptions
|
*bakeOptions
|
||||||
configDir string
|
cfg *confutil.Config
|
||||||
url string
|
url string
|
||||||
cmdContext string
|
cmdContext string
|
||||||
targets []string
|
targets []string
|
||||||
@@ -657,7 +783,7 @@ func (o *bakeOptionsHash) String() string {
|
|||||||
|
|
||||||
joinedFiles := strings.Join(files, ",")
|
joinedFiles := strings.Join(files, ",")
|
||||||
joinedTargets := strings.Join(targets, ",")
|
joinedTargets := strings.Join(targets, ",")
|
||||||
salt := confutil.TryNodeIdentifier(o.configDir)
|
salt := o.cfg.TryNodeIdentifier()
|
||||||
|
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -41,7 +42,7 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/atomicwriter"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||||
"github.com/moby/buildkit/frontend/subrequests"
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
@@ -49,6 +50,7 @@ import (
|
|||||||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
"github.com/moby/buildkit/frontend/subrequests/outline"
|
||||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
"github.com/moby/buildkit/frontend/subrequests/targets"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
solverpb "github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
"github.com/moby/buildkit/util/progress/progressui"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/morikuni/aec"
|
"github.com/morikuni/aec"
|
||||||
@@ -60,6 +62,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
@@ -154,7 +157,7 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inAttests := append([]string{}, o.attests...)
|
inAttests := slices.Clone(o.attests)
|
||||||
if o.provenance != "" {
|
if o.provenance != "" {
|
||||||
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
|
||||||
}
|
}
|
||||||
@@ -181,14 +184,17 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
|
cacheFrom, err := buildflags.ParseCacheEntry(o.cacheFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
|
opts.CacheFrom = cacheFrom.ToPB()
|
||||||
|
|
||||||
|
cacheTo, err := buildflags.ParseCacheEntry(o.cacheTo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
opts.CacheTo = cacheTo.ToPB()
|
||||||
|
|
||||||
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -236,7 +242,7 @@ func buildMetricAttributes(dockerCli command.Cli, driverType string, options *bu
|
|||||||
commandNameAttribute.String("build"),
|
commandNameAttribute.String("build"),
|
||||||
attribute.Stringer(string(commandOptionsHash), &buildOptionsHash{
|
attribute.Stringer(string(commandOptionsHash), &buildOptionsHash{
|
||||||
buildOptions: options,
|
buildOptions: options,
|
||||||
configDir: confutil.ConfigDir(dockerCli),
|
cfg: confutil.NewConfig(dockerCli),
|
||||||
}),
|
}),
|
||||||
driverNameAttribute.String(options.builder),
|
driverNameAttribute.String(options.builder),
|
||||||
driverTypeAttribute.String(driverType),
|
driverTypeAttribute.String(driverType),
|
||||||
@@ -248,7 +254,7 @@ func buildMetricAttributes(dockerCli command.Cli, driverType string, options *bu
|
|||||||
// the fmt.Stringer interface.
|
// the fmt.Stringer interface.
|
||||||
type buildOptionsHash struct {
|
type buildOptionsHash struct {
|
||||||
*buildOptions
|
*buildOptions
|
||||||
configDir string
|
cfg *confutil.Config
|
||||||
result string
|
result string
|
||||||
resultOnce sync.Once
|
resultOnce sync.Once
|
||||||
}
|
}
|
||||||
@@ -265,7 +271,7 @@ func (o *buildOptionsHash) String() string {
|
|||||||
if contextPath != "-" && osutil.IsLocalDir(contextPath) {
|
if contextPath != "-" && osutil.IsLocalDir(contextPath) {
|
||||||
contextPath = osutil.ToAbs(contextPath)
|
contextPath = osutil.ToAbs(contextPath)
|
||||||
}
|
}
|
||||||
salt := confutil.TryNodeIdentifier(o.configDir)
|
salt := o.cfg.TryNodeIdentifier()
|
||||||
|
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
for _, s := range []string{target, contextPath, dockerfile, salt} {
|
for _, s := range []string{target, contextPath, dockerfile, salt} {
|
||||||
@@ -280,7 +286,11 @@ func (o *buildOptionsHash) String() string {
|
|||||||
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
|
||||||
mp := dockerCli.MeterProvider()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
|
||||||
|
attribute.String("builder", options.builder),
|
||||||
|
attribute.String("context", options.contextPath),
|
||||||
|
attribute.String("dockerfile", options.dockerfileName),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -323,8 +333,8 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||||||
}
|
}
|
||||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
progressMode, err := options.toDisplayMode()
|
progressMode, err := options.toDisplayMode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -346,11 +356,12 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||||||
|
|
||||||
done := timeBuildCommand(mp, attributes)
|
done := timeBuildCommand(mp, attributes)
|
||||||
var resp *client.SolveResponse
|
var resp *client.SolveResponse
|
||||||
|
var inputs *build.Inputs
|
||||||
var retErr error
|
var retErr error
|
||||||
if confutil.IsExperimental() {
|
if confutil.IsExperimental() {
|
||||||
resp, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
resp, inputs, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
||||||
} else {
|
} else {
|
||||||
resp, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
resp, inputs, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := printer.Wait(); retErr == nil {
|
if err := printer.Wait(); retErr == nil {
|
||||||
@@ -387,12 +398,16 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if opts.CallFunc != nil {
|
if opts.CallFunc != nil {
|
||||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil {
|
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse, options.target, inputs); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if exitcode != 0 {
|
} else if exitcode != 0 {
|
||||||
os.Exit(exitcode)
|
os.Exit(exitcode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := resp.ExporterResponse["frontend.result.inlinemessage"]; ok {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n%s\n", v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -405,22 +420,22 @@ func getImageID(resp map[string]string) string {
|
|||||||
return dgst
|
return dgst
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||||
resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false)
|
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, opts, dockerCli.In(), printer, false)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
res.Done()
|
res.Done()
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, dfmap, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||||
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||||
// stdin must be usable for monitor
|
// stdin must be usable for monitor
|
||||||
return nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||||
}
|
}
|
||||||
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
|
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := c.Close(); err != nil {
|
if err := c.Close(); err != nil {
|
||||||
@@ -432,12 +447,13 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
// so we need to resolve paths to abosolute ones in the client.
|
// so we need to resolve paths to abosolute ones in the client.
|
||||||
opts, err = controllerapi.ResolveOptionPaths(opts)
|
opts, err = controllerapi.ResolveOptionPaths(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var ref string
|
var ref string
|
||||||
var retErr error
|
var retErr error
|
||||||
var resp *client.SolveResponse
|
var resp *client.SolveResponse
|
||||||
|
var inputs *build.Inputs
|
||||||
|
|
||||||
var f *ioset.SingleForwarder
|
var f *ioset.SingleForwarder
|
||||||
var pr io.ReadCloser
|
var pr io.ReadCloser
|
||||||
@@ -455,15 +471,15 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, resp, err = c.Build(ctx, *opts, pr, printer)
|
ref, resp, inputs, err = c.Build(ctx, opts, pr, printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var be *controllererrors.BuildError
|
var be *controllererrors.BuildError
|
||||||
if errors.As(err, &be) {
|
if errors.As(err, &be) {
|
||||||
ref = be.Ref
|
ref = be.SessionID
|
||||||
retErr = err
|
retErr = err
|
||||||
// We can proceed to monitor
|
// We can proceed to monitor
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Wrapf(err, "failed to build")
|
return nil, nil, errors.Wrapf(err, "failed to build")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -504,7 +520,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, retErr
|
return resp, inputs, retErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func printError(err error, printer *progress.Printer) error {
|
func printError(err error, printer *progress.Printer) error {
|
||||||
@@ -586,7 +602,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
|||||||
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||||
|
|
||||||
@@ -716,7 +732,7 @@ type commonFlags struct {
|
|||||||
|
|
||||||
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
|
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
|
||||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "quiet", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
|
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
|
||||||
}
|
}
|
||||||
@@ -733,15 +749,15 @@ func checkWarnedFlags(f *pflag.Flag) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
func writeMetadataFile(filename string, dt any) error {
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
b, err := json.MarshalIndent(dt, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
return atomicwriter.WriteFile(filename, b, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
|
||||||
decFunc := func(k, v string) ([]byte, error) {
|
decFunc := func(k, v string) ([]byte, error) {
|
||||||
if k == "result.json" {
|
if k == "result.json" {
|
||||||
// result.json is part of metadata response for subrequests which
|
// result.json is part of metadata response for subrequests which
|
||||||
@@ -750,18 +766,21 @@ func decodeExporterResponse(exporterResponse map[string]string) map[string]inter
|
|||||||
}
|
}
|
||||||
return base64.StdEncoding.DecodeString(v)
|
return base64.StdEncoding.DecodeString(v)
|
||||||
}
|
}
|
||||||
out := make(map[string]interface{})
|
out := make(map[string]any)
|
||||||
for k, v := range exporterResponse {
|
for k, v := range exporterResponse {
|
||||||
dt, err := decFunc(k, v)
|
dt, err := decFunc(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out[k] = v
|
out[k] = v
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var raw map[string]interface{}
|
var raw map[string]any
|
||||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
||||||
|
var rawList []map[string]any
|
||||||
|
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
|
||||||
out[k] = v
|
out[k] = v
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
}
|
||||||
out[k] = json.RawMessage(dt)
|
out[k] = json.RawMessage(dt)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
@@ -878,11 +897,10 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
|||||||
src.Print(w)
|
src.Print(w)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "\n")
|
fmt.Fprintf(w, "\n")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) {
|
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string, target string, inp *build.Inputs) (int, error) {
|
||||||
switch f.Name {
|
switch f.Name {
|
||||||
case "outline":
|
case "outline":
|
||||||
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||||
@@ -908,8 +926,27 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
|
|||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
||||||
}
|
}
|
||||||
|
sourceInfoMap := func(sourceInfo *solverpb.SourceInfo) *solverpb.SourceInfo {
|
||||||
|
if sourceInfo == nil || inp == nil {
|
||||||
|
return sourceInfo
|
||||||
|
}
|
||||||
|
if target == "" {
|
||||||
|
target = "default"
|
||||||
|
}
|
||||||
|
|
||||||
err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res)
|
if inp.DockerfileMappingSrc != "" {
|
||||||
|
newSourceInfo := proto.Clone(sourceInfo).(*solverpb.SourceInfo)
|
||||||
|
newSourceInfo.Filename = inp.DockerfileMappingSrc
|
||||||
|
return newSourceInfo
|
||||||
|
}
|
||||||
|
return sourceInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
printLintWarnings := func(dt []byte, w io.Writer) error {
|
||||||
|
return lintResults.PrintTo(w, sourceInfoMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := printValue(w, printLintWarnings, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -924,13 +961,8 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
|
|||||||
if f.Format != "json" && len(lintResults.Warnings) > 0 {
|
if f.Format != "json" && len(lintResults.Warnings) > 0 {
|
||||||
fmt.Fprintln(w)
|
fmt.Fprintln(w)
|
||||||
}
|
}
|
||||||
lintBuf := bytes.NewBuffer([]byte(lintResults.Error.Message + "\n"))
|
lintBuf := bytes.NewBuffer(nil)
|
||||||
sourceInfo := lintResults.Sources[lintResults.Error.Location.SourceIndex]
|
lintResults.PrintErrorTo(lintBuf, sourceInfoMap)
|
||||||
source := errdefs.Source{
|
|
||||||
Info: sourceInfo,
|
|
||||||
Ranges: lintResults.Error.Location.Ranges,
|
|
||||||
}
|
|
||||||
source.Print(lintBuf)
|
|
||||||
return 0, errors.New(lintBuf.String())
|
return 0, errors.New(lintBuf.String())
|
||||||
} else if len(lintResults.Warnings) == 0 && f.Format != "json" {
|
} else if len(lintResults.Warnings) == 0 && f.Format != "json" {
|
||||||
fmt.Fprintln(w, "Check complete, no warnings found.")
|
fmt.Fprintln(w, "Check complete, no warnings found.")
|
||||||
@@ -968,11 +1000,6 @@ func printValue(w io.Writer, printer callFunc, version string, format string, re
|
|||||||
return printer([]byte(res["result.json"]), w)
|
return printer([]byte(res["result.json"]), w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted
|
|
||||||
func printLintViolationsWrapper(dt []byte, w io.Writer) error {
|
|
||||||
return lint.PrintLintViolations(dt, w, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
type invokeConfig struct {
|
type invokeConfig struct {
|
||||||
controllerapi.InvokeConfig
|
controllerapi.InvokeConfig
|
||||||
onFlag string
|
onFlag string
|
||||||
@@ -1000,7 +1027,7 @@ func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cont
|
|||||||
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
||||||
}
|
}
|
||||||
defer con.Reset()
|
defer con.Reset()
|
||||||
return monitor.RunMonitor(ctx, ref, options, cfg.InvokeConfig, c, stdin, stdout, stderr, progress)
|
return monitor.RunMonitor(ctx, ref, options, &cfg.InvokeConfig, c, stdin, stdout, stderr, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *invokeConfig) parseInvokeConfig(invoke, on string) error {
|
func (cfg *invokeConfig) parseInvokeConfig(invoke, on string) error {
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
|||||||
return errors.Errorf("failed to configure terminal: %v", err)
|
return errors.Errorf("failed to configure terminal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
|
_, err = monitor.RunMonitor(ctx, "", nil, &controllerapi.InvokeConfig{
|
||||||
Tty: true,
|
Tty: true,
|
||||||
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
||||||
con.Reset()
|
con.Reset()
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func printKV(w io.Writer, k string, v interface{}) {
|
func printKV(w io.Writer, k string, v any) {
|
||||||
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
160
commands/history/export.go
Normal file
160
commands/history/export.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop/bundle"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type exportOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
output string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runExport(ctx context.Context, dockerCli command.Cli, opts exportOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx, builder.WithData())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.refs) == 0 {
|
||||||
|
opts.refs = []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []historyRecord
|
||||||
|
for _, ref := range opts.refs {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref == "" {
|
||||||
|
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
res = append(res, recs...)
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
res = append(res, recs[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
visited := map[*builder.Node]struct{}{}
|
||||||
|
var clients []*client.Client
|
||||||
|
for _, rec := range res {
|
||||||
|
if _, ok := visited[rec.node]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clients = append(clients, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport := make([]*bundle.Record, 0, len(res))
|
||||||
|
for _, rec := range res {
|
||||||
|
var defaultPlatform string
|
||||||
|
if p := rec.node.Platforms; len(p) > 0 {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
var stg *localstate.StateGroup
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
if st != nil && st.GroupRef != "" {
|
||||||
|
stg, err = ls.ReadGroup(st.GroupRef)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport = append(toExport, &bundle.Record{
|
||||||
|
BuildHistoryRecord: rec.BuildHistoryRecord,
|
||||||
|
DefaultPlatform: defaultPlatform,
|
||||||
|
LocalState: st,
|
||||||
|
StateGroup: stg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var w io.Writer = os.Stdout
|
||||||
|
if opts.output != "" {
|
||||||
|
f, err := os.Create(opts.output)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create output file %q", opts.output)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
w = f
|
||||||
|
} else {
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
return errors.Errorf("refusing to write to console, use --output to specify a file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bundle.Export(ctx, clients, w, toExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options exportOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "export [OPTIONS] [REF]",
|
||||||
|
Short: "Export a build into Docker Desktop bundle",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if options.all && len(args) > 0 {
|
||||||
|
return errors.New("cannot specify refs when using --all")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runExport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVarP(&options.output, "output", "o", "", "Output file path")
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Export all records for the builder")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
135
commands/history/import.go
Normal file
135
commands/history/import.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOptions struct {
|
||||||
|
file []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
|
||||||
|
sock, err := desktop.BuildServerAddr()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||||
|
network, addr, ok := strings.Cut(sock, "://")
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid endpoint address: %s", sock)
|
||||||
|
}
|
||||||
|
return remoteutil.DialContext(ctx, network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
|
||||||
|
if len(opts.file) == 0 {
|
||||||
|
u, err := importFrom(ctx, client, os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
} else {
|
||||||
|
for _, fn := range opts.file {
|
||||||
|
var f *os.File
|
||||||
|
var rdr io.Reader = os.Stdin
|
||||||
|
if fn != "-" {
|
||||||
|
f, err = os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open file %s", fn)
|
||||||
|
}
|
||||||
|
rdr = f
|
||||||
|
}
|
||||||
|
u, err := importFrom(ctx, client, rdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(urls) == 0 {
|
||||||
|
return errors.New("no build records found in the bundle")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, url := range urls {
|
||||||
|
fmt.Fprintln(dockerCli.Err(), url)
|
||||||
|
if i == 0 {
|
||||||
|
err = browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create request")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, errors.Errorf("failed to import build: %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var refs []string
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&refs); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to decode response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
for _, ref := range refs {
|
||||||
|
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
|
||||||
|
}
|
||||||
|
return urls, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
|
||||||
|
var options importOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "import [OPTIONS] < bundle.dockerbuild",
|
||||||
|
Short: "Import a build into Docker Desktop",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runImport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
893
commands/history/inspect.go
Normal file
893
commands/history/inspect.go
Normal file
@@ -0,0 +1,893 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/containerd/v2/core/images"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
|
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type statusT string
|
||||||
|
|
||||||
|
const (
|
||||||
|
statusComplete statusT = "completed"
|
||||||
|
statusRunning statusT = "running"
|
||||||
|
statusError statusT = "failed"
|
||||||
|
statusCanceled statusT = "canceled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type inspectOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
type inspectOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Ref string
|
||||||
|
|
||||||
|
Context string `json:",omitempty"`
|
||||||
|
Dockerfile string `json:",omitempty"`
|
||||||
|
VCSRepository string `json:",omitempty"`
|
||||||
|
VCSRevision string `json:",omitempty"`
|
||||||
|
Target string `json:",omitempty"`
|
||||||
|
Platform []string `json:",omitempty"`
|
||||||
|
KeepGitDir bool `json:",omitempty"`
|
||||||
|
|
||||||
|
NamedContexts []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
StartedAt *time.Time `json:",omitempty"`
|
||||||
|
CompletedAt *time.Time `json:",omitempty"`
|
||||||
|
Duration time.Duration `json:",omitempty"`
|
||||||
|
Status statusT `json:",omitempty"`
|
||||||
|
Error *errorOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
NumCompletedSteps int32
|
||||||
|
NumTotalSteps int32
|
||||||
|
NumCachedSteps int32
|
||||||
|
|
||||||
|
BuildArgs []keyValueOutput `json:",omitempty"`
|
||||||
|
Labels []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Config configOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Materials []materialOutput `json:",omitempty"`
|
||||||
|
Attachments []attachmentOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Errors []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type configOutput struct {
|
||||||
|
Network string `json:",omitempty"`
|
||||||
|
ExtraHosts []string `json:",omitempty"`
|
||||||
|
Hostname string `json:",omitempty"`
|
||||||
|
CgroupParent string `json:",omitempty"`
|
||||||
|
ImageResolveMode string `json:",omitempty"`
|
||||||
|
MultiPlatform bool `json:",omitempty"`
|
||||||
|
NoCache bool `json:",omitempty"`
|
||||||
|
NoCacheFilter []string `json:",omitempty"`
|
||||||
|
|
||||||
|
ShmSize string `json:",omitempty"`
|
||||||
|
Ulimit string `json:",omitempty"`
|
||||||
|
CacheMountNS string `json:",omitempty"`
|
||||||
|
DockerfileCheckConfig string `json:",omitempty"`
|
||||||
|
SourceDateEpoch string `json:",omitempty"`
|
||||||
|
SandboxHostname string `json:",omitempty"`
|
||||||
|
|
||||||
|
RestRaw []keyValueOutput `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type materialOutput struct {
|
||||||
|
URI string `json:",omitempty"`
|
||||||
|
Digests []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachmentOutput struct {
|
||||||
|
Digest string `json:",omitempty"`
|
||||||
|
Platform string `json:",omitempty"`
|
||||||
|
Type string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorOutput struct {
|
||||||
|
Code int `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Logs []string `json:",omitempty"`
|
||||||
|
Sources []byte `json:",omitempty"`
|
||||||
|
Stack []byte `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyValueOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Value string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
|
||||||
|
if sv, ok := attrs[k]; ok {
|
||||||
|
if f != nil {
|
||||||
|
v, ok := f(sv)
|
||||||
|
if ok {
|
||||||
|
*dest = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d, ok := any(dest).(*string); ok {
|
||||||
|
*d = sv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
var defaultPlatform string
|
||||||
|
workers, err := c.ListWorkers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to list workers")
|
||||||
|
}
|
||||||
|
workers0:
|
||||||
|
for _, w := range workers {
|
||||||
|
for _, p := range w.Platforms {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
|
||||||
|
break workers0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
|
||||||
|
attrs := rec.FrontendAttrs
|
||||||
|
delete(attrs, "frontend.caps")
|
||||||
|
|
||||||
|
var out inspectOutput
|
||||||
|
|
||||||
|
var context string
|
||||||
|
var dockerfile string
|
||||||
|
if st != nil {
|
||||||
|
context = st.LocalPath
|
||||||
|
dockerfile = st.DockerfilePath
|
||||||
|
wd, _ := os.Getwd()
|
||||||
|
|
||||||
|
if dockerfile != "" && dockerfile != "-" {
|
||||||
|
if rel, err := filepath.Rel(context, dockerfile); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
dockerfile = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if context != "" {
|
||||||
|
if rel, err := filepath.Rel(wd, context); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
context = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := attrs["context"]; ok && context == "" {
|
||||||
|
delete(attrs, "context")
|
||||||
|
context = v
|
||||||
|
}
|
||||||
|
if dockerfile == "" {
|
||||||
|
if v, ok := attrs["filename"]; ok {
|
||||||
|
dockerfile = v
|
||||||
|
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
|
||||||
|
dockerfile = filepath.Join(dfdir, dockerfile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, "filename")
|
||||||
|
|
||||||
|
out.Name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out.Ref = rec.Ref
|
||||||
|
|
||||||
|
out.Context = context
|
||||||
|
out.Dockerfile = dockerfile
|
||||||
|
|
||||||
|
if _, ok := attrs["context"]; !ok {
|
||||||
|
if src, ok := attrs["vcs:source"]; ok {
|
||||||
|
out.VCSRepository = src
|
||||||
|
}
|
||||||
|
if rev, ok := attrs["vcs:revision"]; ok {
|
||||||
|
out.VCSRevision = rev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
readAttr(attrs, "target", &out.Target, nil)
|
||||||
|
|
||||||
|
readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
var pp []string
|
||||||
|
for _, v := range strings.Split(v, ",") {
|
||||||
|
p, err := platforms.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
|
||||||
|
}
|
||||||
|
if len(pp) == 0 {
|
||||||
|
pp = append(pp, defaultPlatform)
|
||||||
|
}
|
||||||
|
return pp, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
|
||||||
|
out.NamedContexts = readKeyValues(attrs, "context:")
|
||||||
|
|
||||||
|
if rec.CreatedAt != nil {
|
||||||
|
tm := rec.CreatedAt.AsTime().Local()
|
||||||
|
out.StartedAt = &tm
|
||||||
|
}
|
||||||
|
out.Status = statusRunning
|
||||||
|
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
tm := rec.CompletedAt.AsTime().Local()
|
||||||
|
out.CompletedAt = &tm
|
||||||
|
out.Status = statusComplete
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Error != nil || rec.ExternalError != nil {
|
||||||
|
out.Error = &errorOutput{}
|
||||||
|
if rec.Error != nil {
|
||||||
|
if codes.Code(rec.Error.Code) == codes.Canceled {
|
||||||
|
out.Status = statusCanceled
|
||||||
|
} else {
|
||||||
|
out.Status = statusError
|
||||||
|
}
|
||||||
|
out.Error.Code = int(codes.Code(rec.Error.Code))
|
||||||
|
out.Error.Message = rec.Error.Message
|
||||||
|
}
|
||||||
|
if rec.ExternalError != nil {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
var st spb.Status
|
||||||
|
if err := proto.Unmarshal(dt, &st); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
|
||||||
|
var errsources bytes.Buffer
|
||||||
|
for _, s := range errdefs.Sources(retErr) {
|
||||||
|
s.Print(&errsources)
|
||||||
|
errsources.WriteString("\n")
|
||||||
|
}
|
||||||
|
out.Error.Sources = errsources.Bytes()
|
||||||
|
var ve *errdefs.VertexError
|
||||||
|
if errors.As(retErr, &ve) {
|
||||||
|
dgst, err := digest.Parse(ve.Vertex.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
|
||||||
|
}
|
||||||
|
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
|
||||||
|
}
|
||||||
|
out.Error.Name = name
|
||||||
|
out.Error.Logs = logs
|
||||||
|
}
|
||||||
|
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.StartedAt != nil {
|
||||||
|
if out.CompletedAt != nil {
|
||||||
|
out.Duration = out.CompletedAt.Sub(*out.StartedAt)
|
||||||
|
} else {
|
||||||
|
out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.NumCompletedSteps = rec.NumCompletedSteps
|
||||||
|
out.NumTotalSteps = rec.NumTotalSteps
|
||||||
|
out.NumCachedSteps = rec.NumCachedSteps
|
||||||
|
|
||||||
|
out.BuildArgs = readKeyValues(attrs, "build-arg:")
|
||||||
|
out.Labels = readKeyValues(attrs, "label:")
|
||||||
|
|
||||||
|
readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
|
||||||
|
readAttr(attrs, "hostname", &out.Config.Hostname, nil)
|
||||||
|
readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
|
||||||
|
readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
return false, false
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return strings.Split(v, ","), true
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fields, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
|
||||||
|
readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
|
||||||
|
readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
|
||||||
|
readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
|
||||||
|
|
||||||
|
var unusedAttrs []keyValueOutput
|
||||||
|
for k := range attrs {
|
||||||
|
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
unusedAttrs = append(unusedAttrs, keyValueOutput{
|
||||||
|
Name: k,
|
||||||
|
Value: attrs[k],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
out.Config.RestRaw = unusedAttrs
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
|
||||||
|
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
|
||||||
|
})
|
||||||
|
if provIndex != -1 {
|
||||||
|
prov := attachments[provIndex]
|
||||||
|
dt, err := content.ReadBlob(ctx, store, prov.descr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
var pred provenancetypes.ProvenancePredicate
|
||||||
|
if err := json.Unmarshal(dt, &pred); err != nil {
|
||||||
|
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
for _, m := range pred.Materials {
|
||||||
|
out.Materials = append(out.Materials, materialOutput{
|
||||||
|
URI: m.URI,
|
||||||
|
Digests: digestSetToDigests(m.Digest),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(attachments) > 0 {
|
||||||
|
for _, a := range attachments {
|
||||||
|
p := ""
|
||||||
|
if a.platform != nil {
|
||||||
|
p = platforms.FormatAll(*a.platform)
|
||||||
|
}
|
||||||
|
out.Attachments = append(out.Attachments, attachmentOutput{
|
||||||
|
Digest: a.descr.Digest.String(),
|
||||||
|
Platform: p,
|
||||||
|
Type: descrType(a.descr),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.format == formatter.JSONFormatKey {
|
||||||
|
enc := json.NewEncoder(dockerCli.Out())
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(out)
|
||||||
|
} else if opts.format != formatter.PrettyFormatKey {
|
||||||
|
tmpl, err := template.New("inspect").Parse(opts.format)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse format template")
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := tmpl.Execute(&buf, out); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to execute format template")
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), buf.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Name != "" {
|
||||||
|
fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
|
||||||
|
}
|
||||||
|
if opts.ref == "" && out.Ref != "" {
|
||||||
|
fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
|
||||||
|
}
|
||||||
|
if out.Context != "" {
|
||||||
|
fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
|
||||||
|
}
|
||||||
|
if out.Dockerfile != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
|
||||||
|
}
|
||||||
|
if out.VCSRepository != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
|
||||||
|
}
|
||||||
|
if out.VCSRevision != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Target != "" {
|
||||||
|
fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Platform) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.KeepGitDir {
|
||||||
|
fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
|
||||||
|
var statusStr string
|
||||||
|
if out.Status == statusRunning {
|
||||||
|
statusStr = " (running)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
|
||||||
|
|
||||||
|
if out.Status == statusError {
|
||||||
|
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
|
||||||
|
} else if out.Status == statusCanceled {
|
||||||
|
fmt.Fprintf(tw, "Status:\tCanceled\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Config.Network != "" {
|
||||||
|
fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
|
||||||
|
}
|
||||||
|
if out.Config.Hostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
|
||||||
|
}
|
||||||
|
if len(out.Config.ExtraHosts) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
|
||||||
|
}
|
||||||
|
if out.Config.CgroupParent != "" {
|
||||||
|
fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
|
||||||
|
}
|
||||||
|
if out.Config.ImageResolveMode != "" {
|
||||||
|
fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
|
||||||
|
}
|
||||||
|
if out.Config.MultiPlatform {
|
||||||
|
fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
|
||||||
|
}
|
||||||
|
if out.Config.NoCache {
|
||||||
|
fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
|
||||||
|
}
|
||||||
|
if len(out.Config.NoCacheFilter) > 0 {
|
||||||
|
fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Config.ShmSize != "" {
|
||||||
|
fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
|
||||||
|
}
|
||||||
|
if out.Config.Ulimit != "" {
|
||||||
|
fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
|
||||||
|
}
|
||||||
|
if out.Config.CacheMountNS != "" {
|
||||||
|
fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
|
||||||
|
}
|
||||||
|
if out.Config.DockerfileCheckConfig != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
|
||||||
|
}
|
||||||
|
if out.Config.SourceDateEpoch != "" {
|
||||||
|
fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
|
||||||
|
}
|
||||||
|
if out.Config.SandboxHostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range out.Config.RestRaw {
|
||||||
|
fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
|
||||||
|
printTable(dockerCli.Out(), out.Labels, "Label")
|
||||||
|
|
||||||
|
if len(out.Materials) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Materials:")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "URI\tDIGEST\n")
|
||||||
|
for _, m := range out.Materials {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Attachments) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Attachments:\n")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
|
||||||
|
for _, a := range out.Attachments {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Error != nil {
|
||||||
|
if out.Error.Sources != nil {
|
||||||
|
fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
|
||||||
|
}
|
||||||
|
if len(out.Error.Logs) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Logs:")
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
|
||||||
|
for _, l := range out.Error.Logs {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "> "+l)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
if len(out.Error.Stack) > 0 {
|
||||||
|
if debug.IsEnabled() {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options inspectOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "inspect [OPTIONS] [REF]",
|
||||||
|
Short: "Inspect a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runInspect(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
attachmentCmd(dockerCli, rootOpts),
|
||||||
|
)
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
|
||||||
|
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
var logs []string
|
||||||
|
lastState := map[int]int{}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
st.CloseSend()
|
||||||
|
return "", nil, context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := st.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
ss := client.NewSolveStatus(ev)
|
||||||
|
for _, v := range ss.Vertexes {
|
||||||
|
if v.Digest == dgst {
|
||||||
|
name = v.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, l := range ss.Logs {
|
||||||
|
if l.Vertex == dgst {
|
||||||
|
parts := bytes.Split(l.Data, []byte("\n"))
|
||||||
|
for i, p := range parts {
|
||||||
|
var wrote bool
|
||||||
|
if i == 0 {
|
||||||
|
idx, ok := lastState[l.Stream]
|
||||||
|
if ok && idx != -1 {
|
||||||
|
logs[idx] = logs[idx] + string(p)
|
||||||
|
wrote = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !wrote {
|
||||||
|
if len(p) > 0 {
|
||||||
|
logs = append(logs, string(p))
|
||||||
|
}
|
||||||
|
lastState[l.Stream] = len(logs) - 1
|
||||||
|
}
|
||||||
|
if i == len(parts)-1 && len(p) == 0 {
|
||||||
|
lastState[l.Stream] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit > 0 && len(logs) > limit {
|
||||||
|
logs = logs[len(logs)-limit:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return name, logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachment struct {
|
||||||
|
platform *ocispecs.Platform
|
||||||
|
descr ocispecs.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
|
||||||
|
var attachments []attachment
|
||||||
|
|
||||||
|
if rec.Result != nil {
|
||||||
|
for _, a := range rec.Result.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range rec.Result.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, ri := range rec.Results {
|
||||||
|
p, err := platforms.Parse(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, a := range ri.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
platform: &p,
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range ri.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(attachments, func(a, b attachment) int {
|
||||||
|
pCmp := 0
|
||||||
|
if a.platform == nil && b.platform != nil {
|
||||||
|
return -1
|
||||||
|
} else if a.platform != nil && b.platform == nil {
|
||||||
|
return 1
|
||||||
|
} else if a.platform != nil && b.platform != nil {
|
||||||
|
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
|
||||||
|
}
|
||||||
|
return cmp.Or(
|
||||||
|
pCmp,
|
||||||
|
cmp.Compare(descrType(a.descr), descrType(b.descr)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
return attachments, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
|
||||||
|
_, err := store.Info(ctx, desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []attachment
|
||||||
|
|
||||||
|
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
|
||||||
|
out = append(out, attachment{platform: platform, descr: desc})
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := content.ReadBlob(ctx, store, desc)
|
||||||
|
if err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var idx ocispecs.Index
|
||||||
|
if err := json.Unmarshal(dt, &idx); err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range idx.Manifests {
|
||||||
|
p := platform
|
||||||
|
if d.Platform != nil {
|
||||||
|
p = d.Platform
|
||||||
|
}
|
||||||
|
out = append(out, walkAttachments(ctx, store, d, p)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
|
||||||
|
return ocispecs.Descriptor{
|
||||||
|
MediaType: in.MediaType,
|
||||||
|
Digest: digest.Digest(in.Digest),
|
||||||
|
Size: in.Size,
|
||||||
|
Annotations: in.Annotations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func descrType(desc ocispecs.Descriptor) string {
|
||||||
|
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
return desc.MediaType
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
|
||||||
|
v, err := f(s)
|
||||||
|
if err != nil {
|
||||||
|
errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
|
||||||
|
*errs = append(*errs, errStr)
|
||||||
|
}
|
||||||
|
return v, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTable(w io.Writer, kvs []keyValueOutput, title string) {
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
|
||||||
|
for _, k := range kvs {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
|
||||||
|
var out []keyValueOutput
|
||||||
|
for k, v := range attrs {
|
||||||
|
if strings.HasPrefix(k, prefix) {
|
||||||
|
out = append(out, keyValueOutput{
|
||||||
|
Name: strings.TrimPrefix(k, prefix),
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
slices.SortFunc(out, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func digestSetToDigests(ds slsa.DigestSet) []string {
|
||||||
|
var out []string
|
||||||
|
for k, v := range ds {
|
||||||
|
out = append(out, fmt.Sprintf("%s:%s", k, v))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
145
commands/history/inspect_attachment.go
Normal file
145
commands/history/inspect_attachment.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
intoto "github.com/in-toto/in-toto-golang/in_toto"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type attachmentOptions struct {
|
||||||
|
builder string
|
||||||
|
typ string
|
||||||
|
platform string
|
||||||
|
ref string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
if opts.digest != "" {
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := opts.typ
|
||||||
|
switch typ {
|
||||||
|
case "index":
|
||||||
|
typ = ocispecs.MediaTypeImageIndex
|
||||||
|
case "manifest":
|
||||||
|
typ = ocispecs.MediaTypeImageManifest
|
||||||
|
case "image":
|
||||||
|
typ = ocispecs.MediaTypeImageConfig
|
||||||
|
case "provenance":
|
||||||
|
typ = slsa02.PredicateSLSAProvenance
|
||||||
|
case "sbom":
|
||||||
|
typ = intoto.PredicateSPDX
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range attachments {
|
||||||
|
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if typ != "" && descrType(a.descr) != typ {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ra, err := store.ReaderAt(ctx, a.descr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options attachmentOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "attachment [OPTIONS] REF [DIGEST]",
|
||||||
|
Short: "Inspect a build attachment",
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
dgst, err := digest.Parse(args[1])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid digest %q", args[1])
|
||||||
|
}
|
||||||
|
options.digest = dgst
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.digest == "" && options.platform == "" && options.typ == "" {
|
||||||
|
return errors.New("at least one of --type, --platform or DIGEST must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runAttachment(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.typ, "type", "", "Type of attachment")
|
||||||
|
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
117
commands/history/logs.go
Normal file
117
commands/history/logs.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logsOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
progress string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
|
||||||
|
if mode == progressui.AutoMode {
|
||||||
|
mode = progressui.PlainMode
|
||||||
|
}
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
cl.CloseSend()
|
||||||
|
return context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := cl.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
printer.Write(client.NewSolveStatus(ev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return printer.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options logsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "logs [OPTIONS] [REF]",
|
||||||
|
Short: "Print the logs of a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLogs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
264
commands/history/ls.go
Normal file
264
commands/history/ls.go
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lsHeaderBuildID = "BUILD ID"
|
||||||
|
lsHeaderName = "NAME"
|
||||||
|
lsHeaderStatus = "STATUS"
|
||||||
|
lsHeaderCreated = "CREATED AT"
|
||||||
|
lsHeaderDuration = "DURATION"
|
||||||
|
lsHeaderLink = ""
|
||||||
|
|
||||||
|
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
|
||||||
|
|
||||||
|
headerKeyTimestamp = "buildkit-current-timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type lsOptions struct {
|
||||||
|
builder string
|
||||||
|
format string
|
||||||
|
noTrunc bool
|
||||||
|
|
||||||
|
filters []string
|
||||||
|
local bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queryOptions := &queryOptions{}
|
||||||
|
|
||||||
|
if opts.local {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||||
|
if err != nil {
|
||||||
|
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||||
|
return errors.Wrap(err, "git was not found in the system")
|
||||||
|
}
|
||||||
|
return errors.Wrapf(err, "could not find git repository for local filter")
|
||||||
|
}
|
||||||
|
remote, err := gitc.RemoteURL()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "could not get remote URL for local filter")
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, fmt.Sprintf("repository=%s", remote))
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, opts.filters...)
|
||||||
|
|
||||||
|
out, err := queryRecords(ctx, "", nodes, queryOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, rec := range out {
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
rec.name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out[i] = rec
|
||||||
|
}
|
||||||
|
|
||||||
|
return lsPrint(dockerCli, out, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options lsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List build records",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
flags.StringArrayVar(&options.filters, "filter", nil, `Provide filter values (e.g., "status=error")`)
|
||||||
|
flags.BoolVar(&options.local, "local", false, "List records for current repository only")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
|
||||||
|
if in.format == formatter.TableFormatKey {
|
||||||
|
in.format = lsDefaultTableFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.Format(in.format),
|
||||||
|
Trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(records, func(a, b historyRecord) int {
|
||||||
|
if a.CompletedAt == nil && b.CompletedAt != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.CompletedAt != nil && b.CompletedAt == nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
|
for _, r := range records {
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: formatter.Format(in.format),
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
record: &r,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lsCtx := lsContext{
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
lsCtx.Header = formatter.SubHeaderContext{
|
||||||
|
"Ref": lsHeaderBuildID,
|
||||||
|
"Name": lsHeaderName,
|
||||||
|
"Status": lsHeaderStatus,
|
||||||
|
"CreatedAt": lsHeaderCreated,
|
||||||
|
"Duration": lsHeaderDuration,
|
||||||
|
"Link": lsHeaderLink,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Write(&lsCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsContext struct {
|
||||||
|
formatter.HeaderContext
|
||||||
|
|
||||||
|
isTerm bool
|
||||||
|
trunc bool
|
||||||
|
format formatter.Format
|
||||||
|
record *historyRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]any{
|
||||||
|
"ref": c.FullRef(),
|
||||||
|
"name": c.Name(),
|
||||||
|
"status": c.Status(),
|
||||||
|
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
|
||||||
|
"total_steps": c.record.NumTotalSteps,
|
||||||
|
"completed_steps": c.record.NumCompletedSteps,
|
||||||
|
"cached_steps": c.record.NumCachedSteps,
|
||||||
|
}
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Ref() string {
|
||||||
|
return c.record.Ref
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) FullRef() string {
|
||||||
|
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Name() string {
|
||||||
|
name := c.record.name
|
||||||
|
if c.trunc && c.format.IsTable() {
|
||||||
|
return trimBeginning(name, 36)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Status() string {
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
if c.record.Error != nil {
|
||||||
|
return "Error"
|
||||||
|
}
|
||||||
|
return "Completed"
|
||||||
|
}
|
||||||
|
return "Running"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) CreatedAt() string {
|
||||||
|
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Duration() string {
|
||||||
|
lastTime := c.record.currentTimestamp
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
tm := c.record.CompletedAt.AsTime()
|
||||||
|
lastTime = &tm
|
||||||
|
}
|
||||||
|
if lastTime == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
|
||||||
|
if c.record.CompletedAt == nil {
|
||||||
|
v += "+"
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Link() string {
|
||||||
|
url := desktop.BuildURL(c.FullRef())
|
||||||
|
if c.format.IsTable() {
|
||||||
|
if c.isTerm {
|
||||||
|
return desktop.ANSIHyperlink(url, "Open")
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return url
|
||||||
|
}
|
||||||
73
commands/history/open.go
Normal file
73
commands/history/open.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type openOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||||
|
return browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options openOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "open [OPTIONS] [REF]",
|
||||||
|
Short: "Open a build in Docker Desktop",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runOpen(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
151
commands/history/rm.go
Normal file
151
commands/history/rm.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rmOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make([][]error, len(opts.refs))
|
||||||
|
for i := range errs {
|
||||||
|
errs[i] = make([]error, len(nodes))
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
refs := opts.refs
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
refs = append(refs, resp.Record.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, ref := range refs {
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: ref,
|
||||||
|
Delete: true,
|
||||||
|
})
|
||||||
|
if opts.all {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errs[j][i] = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []error
|
||||||
|
loop0:
|
||||||
|
for _, nodeErrs := range errs {
|
||||||
|
var nodeErr error
|
||||||
|
for _, err1 := range nodeErrs {
|
||||||
|
if err1 == nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
if nodeErr == nil {
|
||||||
|
nodeErr = err1
|
||||||
|
} else {
|
||||||
|
nodeErr = multierror.Append(nodeErr, err1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, nodeErr)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(out) == 1 {
|
||||||
|
return out[0]
|
||||||
|
}
|
||||||
|
return multierror.Append(out[0], out[1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options rmOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm [OPTIONS] [REF...]",
|
||||||
|
Short: "Remove build records",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) == 0 && !options.all {
|
||||||
|
return errors.New("rm requires at least one argument")
|
||||||
|
}
|
||||||
|
if len(args) > 0 && options.all {
|
||||||
|
return errors.New("rm requires either --all or at least one argument")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runRm(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Remove all build records")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
33
commands/history/root.go
Normal file
33
commands/history/root.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RootOptions struct {
|
||||||
|
Builder *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "history",
|
||||||
|
Short: "Commands to work on build records",
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
RunE: rootcmd.RunE,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
lsCmd(dockerCli, opts),
|
||||||
|
rmCmd(dockerCli, opts),
|
||||||
|
logsCmd(dockerCli, opts),
|
||||||
|
inspectCmd(dockerCli, opts),
|
||||||
|
openCmd(dockerCli, opts),
|
||||||
|
traceCmd(dockerCli, opts),
|
||||||
|
importCmd(dockerCli, opts),
|
||||||
|
exportCmd(dockerCli, opts),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
228
commands/history/trace.go
Normal file
228
commands/history/trace.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/otelutil"
|
||||||
|
"github.com/docker/buildx/util/otelutil/jaeger"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
jaegerui "github.com/tonistiigi/jaeger-ui-rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type traceOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
addr string
|
||||||
|
compare string
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return "", nil, errors.New("no records found")
|
||||||
|
}
|
||||||
|
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
if rec.CompletedAt == nil {
|
||||||
|
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Trace == nil {
|
||||||
|
// build is complete but no trace yet. try to finalize the trace
|
||||||
|
time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
Finalize: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec = &recs[0]
|
||||||
|
if rec.Trace == nil {
|
||||||
|
return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
|
||||||
|
Digest: digest.Digest(rec.Trace.Digest),
|
||||||
|
MediaType: rec.Trace.MediaType,
|
||||||
|
Size: rec.Trace.Size,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapper := struct {
|
||||||
|
Data []jaeger.Trace `json:"data"`
|
||||||
|
}{
|
||||||
|
Data: spans.JaegerData().Data,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(wrapper.Data) == 0 {
|
||||||
|
return "", nil, errors.New("no trace data")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
if err := enc.Encode(wrapper); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
traceID, data, err := loadTrace(ctx, opts.ref, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srv := jaegerui.NewServer(jaegerui.Config{})
|
||||||
|
if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url := "/trace/" + traceID
|
||||||
|
|
||||||
|
if opts.compare != "" {
|
||||||
|
traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
|
||||||
|
}
|
||||||
|
if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url = "/trace/" + traceIDcomp + "..." + traceID
|
||||||
|
}
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !term && opts.compare == "" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), string(data))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ln, err := net.Listen("tcp", opts.addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
browser.OpenURL(url)
|
||||||
|
}()
|
||||||
|
|
||||||
|
url = "http://" + ln.Addr().String() + url
|
||||||
|
fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
ln.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = srv.Serve(ln)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options traceOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "trace [OPTIONS] [REF]",
|
||||||
|
Short: "Show the OpenTelemetry trace of a build record",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runTrace(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
|
||||||
|
flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
403
commands/history/utils.go
Normal file
403
commands/history/utils.go
Normal file
@@ -0,0 +1,403 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const recordsLimit = 50
|
||||||
|
|
||||||
|
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||||
|
var res string
|
||||||
|
|
||||||
|
var target, contextPath, dockerfilePath, vcsSource string
|
||||||
|
if v, ok := fattrs["target"]; ok {
|
||||||
|
target = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["context"]; ok {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:source"]; ok {
|
||||||
|
vcsSource = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
|
||||||
|
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
var localPath string
|
||||||
|
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
|
||||||
|
if ls.LocalPath != "" && ls.LocalPath != "-" {
|
||||||
|
localPath = filepath.ToSlash(ls.LocalPath)
|
||||||
|
}
|
||||||
|
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove default dockerfile name
|
||||||
|
const defaultFilename = "/Dockerfile"
|
||||||
|
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
|
||||||
|
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
|
||||||
|
|
||||||
|
// dockerfile is a subpath of context
|
||||||
|
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
|
||||||
|
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
|
||||||
|
} else {
|
||||||
|
// Otherwise, use basename
|
||||||
|
bpath := localPath
|
||||||
|
if len(dockerfilePath) > 0 {
|
||||||
|
bpath = dockerfilePath
|
||||||
|
}
|
||||||
|
if len(bpath) > 0 {
|
||||||
|
lidx := strings.LastIndex(bpath, "/")
|
||||||
|
res = bpath[lidx+1:]
|
||||||
|
if !hasDefaultFileName {
|
||||||
|
if lidx != -1 {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
|
||||||
|
} else {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(contextPath) > 0 {
|
||||||
|
res = contextPath
|
||||||
|
}
|
||||||
|
if len(target) > 0 {
|
||||||
|
if len(res) > 0 {
|
||||||
|
res = res + " (" + target + ")"
|
||||||
|
} else {
|
||||||
|
res = target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res == "" && vcsSource != "" {
|
||||||
|
return vcsSource
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimBeginning(s string, n int) string {
|
||||||
|
if len(s) <= n {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return ".." + s[len(s)-n+2:]
|
||||||
|
}
|
||||||
|
|
||||||
|
type historyRecord struct {
|
||||||
|
*controlapi.BuildHistoryRecord
|
||||||
|
currentTimestamp *time.Time
|
||||||
|
node *builder.Node
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type queryOptions struct {
|
||||||
|
CompletedOnly bool
|
||||||
|
Filters []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
|
||||||
|
var mu sync.Mutex
|
||||||
|
var out []historyRecord
|
||||||
|
|
||||||
|
var offset *int
|
||||||
|
if strings.HasPrefix(ref, "^") {
|
||||||
|
off, err := strconv.Atoi(ref[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid offset %q", ref)
|
||||||
|
}
|
||||||
|
offset = &off
|
||||||
|
ref = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var filters []string
|
||||||
|
if opts != nil {
|
||||||
|
filters = opts.Filters
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for _, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var records []historyRecord
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchers []matchFunc
|
||||||
|
if len(filters) > 0 {
|
||||||
|
filters, matchers, err = dockerFiltersToBuildkit(filters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sb := bytes.NewBuffer(nil)
|
||||||
|
w := csv.NewWriter(sb)
|
||||||
|
w.Write(filters)
|
||||||
|
w.Flush()
|
||||||
|
filters = []string{strings.TrimSuffix(sb.String(), "\n")}
|
||||||
|
}
|
||||||
|
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
Ref: ref,
|
||||||
|
Limit: recordsLimit,
|
||||||
|
Filter: filters,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
md, err := serv.Header()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var ts *time.Time
|
||||||
|
if v, ok := md[headerKeyTimestamp]; ok {
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, v[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ts = &t
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
he, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// for older buildkit that don't support filters apply local filters
|
||||||
|
for _, matcher := range matchers {
|
||||||
|
if !matcher(he.Record) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
records = append(records, historyRecord{
|
||||||
|
BuildHistoryRecord: he.Record,
|
||||||
|
currentTimestamp: ts,
|
||||||
|
node: &node,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
out = append(out, records...)
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(out, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
if offset != nil {
|
||||||
|
var filtered []historyRecord
|
||||||
|
for _, r := range out {
|
||||||
|
if *offset > 0 {
|
||||||
|
*offset--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, r)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if *offset > 0 {
|
||||||
|
return nil, errors.Errorf("no completed build found with offset %d", *offset)
|
||||||
|
}
|
||||||
|
out = filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||||
|
}
|
||||||
|
|
||||||
|
type matchFunc func(*controlapi.BuildHistoryRecord) bool
|
||||||
|
|
||||||
|
func dockerFiltersToBuildkit(in []string) ([]string, []matchFunc, error) {
|
||||||
|
out := []string{}
|
||||||
|
matchers := []matchFunc{}
|
||||||
|
for _, f := range in {
|
||||||
|
key, value, sep, found := cutAny(f, "!=", "=", "<=", "<", ">=", ">")
|
||||||
|
if !found {
|
||||||
|
return nil, nil, errors.Errorf("invalid filter %q", f)
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "ref", "repository", "status":
|
||||||
|
if sep != "=" && sep != "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected = or !=", f)
|
||||||
|
}
|
||||||
|
matchers = append(matchers, valueFiler(key, value, sep))
|
||||||
|
if sep == "=" {
|
||||||
|
if key == "status" {
|
||||||
|
sep = "=="
|
||||||
|
} else {
|
||||||
|
sep = "~="
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "startedAt", "completedAt", "duration":
|
||||||
|
if sep == "=" || sep == "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected <=, <, >= or >", f)
|
||||||
|
}
|
||||||
|
matcher, err := timeBasedFilter(key, value, sep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
matchers = append(matchers, matcher)
|
||||||
|
default:
|
||||||
|
return nil, nil, errors.Errorf("unsupported filter %q", f)
|
||||||
|
}
|
||||||
|
out = append(out, key+sep+value)
|
||||||
|
}
|
||||||
|
return out, matchers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueFiler(key, value, sep string) matchFunc {
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var recValue string
|
||||||
|
switch key {
|
||||||
|
case "ref":
|
||||||
|
recValue = rec.Ref
|
||||||
|
case "repository":
|
||||||
|
v, ok := rec.FrontendAttrs["vcs:source"]
|
||||||
|
if ok {
|
||||||
|
recValue = v
|
||||||
|
} else {
|
||||||
|
if context, ok := rec.FrontendAttrs["context"]; ok {
|
||||||
|
if ref, err := gitutil.ParseGitRef(context); err == nil {
|
||||||
|
recValue = ref.Remote
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "status":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
if rec.Error != nil {
|
||||||
|
if strings.Contains(rec.Error.Message, "context canceled") {
|
||||||
|
recValue = "canceled"
|
||||||
|
} else {
|
||||||
|
recValue = "error"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "completed"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "running"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case "=":
|
||||||
|
if key == "status" {
|
||||||
|
return recValue == value
|
||||||
|
}
|
||||||
|
return strings.Contains(recValue, value)
|
||||||
|
case "!=":
|
||||||
|
return recValue != value
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeBasedFilter(key, value, sep string) (matchFunc, error) {
|
||||||
|
var cmp int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt", "completedAt":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err == nil {
|
||||||
|
tm := time.Now().Add(-v)
|
||||||
|
cmp = tm.Unix()
|
||||||
|
} else {
|
||||||
|
tm, err := time.Parse(time.RFC3339, value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid time %s", value)
|
||||||
|
}
|
||||||
|
cmp = tm.Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid duration %s", value)
|
||||||
|
}
|
||||||
|
cmp = int64(v)
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var val int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt":
|
||||||
|
val = rec.CreatedAt.AsTime().Unix()
|
||||||
|
case "completedAt":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = rec.CompletedAt.AsTime().Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = int64(rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case ">=":
|
||||||
|
return val >= cmp
|
||||||
|
case "<=":
|
||||||
|
return val <= cmp
|
||||||
|
case ">":
|
||||||
|
return val > cmp
|
||||||
|
default:
|
||||||
|
return val < cmp
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cutAny(s string, seps ...string) (before, after, sep string, found bool) {
|
||||||
|
for _, sep := range seps {
|
||||||
|
if idx := strings.Index(s, sep); idx != -1 {
|
||||||
|
return s[:idx], s[idx+len(sep):], sep, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, "", "", false
|
||||||
|
}
|
||||||
@@ -42,7 +42,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileArgs := make([]string, len(in.files))
|
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||||
for i, f := range in.files {
|
for i, f := range in.files {
|
||||||
dt, err := os.ReadFile(f)
|
dt, err := os.ReadFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -173,8 +173,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imageopt)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -194,7 +194,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
}
|
}
|
||||||
s := s
|
s := s
|
||||||
eg2.Go(func() error {
|
eg2.Go(func() error {
|
||||||
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
|
||||||
return r.Copy(ctx, s, t)
|
return r.Copy(ctx, s, t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -202,7 +202,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||||||
if err := eg2.Wait(); err != nil {
|
if err := eg2.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
|
||||||
return r.Push(ctx, t, desc, dt)
|
return r.Push(ctx, t, desc, dt)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -10,11 +10,12 @@ type RootOptions struct {
|
|||||||
Builder *string
|
Builder *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "imagetools",
|
Use: "imagetools",
|
||||||
Short: "Commands to work on images in registry",
|
Short: "Commands to work on images in registry",
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
|
RunE: rootcmd.RunE,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,8 +35,9 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
@@ -113,6 +115,25 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||||||
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(nodes[i].CDIDevices) > 0 {
|
||||||
|
fmt.Fprintf(w, "Devices:\n")
|
||||||
|
for _, dev := range nodes[i].CDIDevices {
|
||||||
|
fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
|
||||||
|
if dev.OnDemand {
|
||||||
|
fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
|
||||||
|
}
|
||||||
|
if len(dev.Annotations) > 0 {
|
||||||
|
fmt.Fprintf(w, "\tAnnotations:\n")
|
||||||
|
for k, v := range dev.Annotations {
|
||||||
|
fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for ri, rule := range nodes[i].GCPolicy {
|
for ri, rule := range nodes[i].GCPolicy {
|
||||||
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
||||||
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
||||||
@@ -122,8 +143,20 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||||||
if rule.KeepDuration > 0 {
|
if rule.KeepDuration > 0 {
|
||||||
fmt.Fprintf(w, "\tKeep Duration:\t%v\n", rule.KeepDuration.String())
|
fmt.Fprintf(w, "\tKeep Duration:\t%v\n", rule.KeepDuration.String())
|
||||||
}
|
}
|
||||||
if rule.KeepBytes > 0 {
|
if rule.ReservedSpace > 0 {
|
||||||
fmt.Fprintf(w, "\tKeep Bytes:\t%s\n", units.BytesSize(float64(rule.KeepBytes)))
|
fmt.Fprintf(w, "\tReserved Space:\t%s\n", units.BytesSize(float64(rule.ReservedSpace)))
|
||||||
|
}
|
||||||
|
if rule.MaxUsedSpace > 0 {
|
||||||
|
fmt.Fprintf(w, "\tMax Used Space:\t%s\n", units.BytesSize(float64(rule.MaxUsedSpace)))
|
||||||
|
}
|
||||||
|
if rule.MinFreeSpace > 0 {
|
||||||
|
fmt.Fprintf(w, "\tMin Free Space:\t%s\n", units.BytesSize(float64(rule.MinFreeSpace)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for f, dt := range nodes[i].Files {
|
||||||
|
fmt.Fprintf(w, "File#%s:\n", f)
|
||||||
|
for _, line := range strings.Split(string(dt), "\n") {
|
||||||
|
fmt.Fprintf(w, "\t> %s\n", line)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
163
commands/ls.go
163
commands/ls.go
@@ -4,10 +4,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
@@ -17,6 +19,7 @@ import (
|
|||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -36,6 +39,7 @@ const (
|
|||||||
|
|
||||||
type lsOptions struct {
|
type lsOptions struct {
|
||||||
format string
|
format string
|
||||||
|
noTrunc bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||||
@@ -55,8 +59,9 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
@@ -72,7 +77,7 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasErrors, err := lsPrint(dockerCli, current, builders, in.format); err != nil {
|
if hasErrors, err := lsPrint(dockerCli, current, builders, in); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if hasErrors {
|
} else if hasErrors {
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||||
@@ -107,6 +112,7 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
// hide builder persistent flag for this command
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
@@ -114,14 +120,15 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builder.Builder, format string) (hasErrors bool, _ error) {
|
func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builder.Builder, in lsOptions) (hasErrors bool, _ error) {
|
||||||
if format == formatter.TableFormatKey {
|
if in.format == formatter.TableFormatKey {
|
||||||
format = lsDefaultTableFormat
|
in.format = lsDefaultTableFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := formatter.Context{
|
ctx := formatter.Context{
|
||||||
Output: dockerCli.Out(),
|
Output: dockerCli.Out(),
|
||||||
Format: formatter.Format(format),
|
Format: formatter.Format(in.format),
|
||||||
|
Trunc: !in.noTrunc,
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.SliceStable(builders, func(i, j int) bool {
|
sort.SliceStable(builders, func(i, j int) bool {
|
||||||
@@ -138,11 +145,12 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
|
|||||||
render := func(format func(subContext formatter.SubContext) error) error {
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if err := format(&lsContext{
|
if err := format(&lsContext{
|
||||||
|
format: ctx.Format,
|
||||||
|
trunc: ctx.Trunc,
|
||||||
Builder: &lsBuilder{
|
Builder: &lsBuilder{
|
||||||
Builder: b,
|
Builder: b,
|
||||||
Current: b.Name == current.Name,
|
Current: b.Name == current.Name,
|
||||||
},
|
},
|
||||||
format: ctx.Format,
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -152,6 +160,9 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if ctx.Format.IsJSON() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
for _, n := range b.Nodes() {
|
for _, n := range b.Nodes() {
|
||||||
if n.Err != nil {
|
if n.Err != nil {
|
||||||
if ctx.Format.IsTable() {
|
if ctx.Format.IsTable() {
|
||||||
@@ -160,6 +171,7 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
|
|||||||
}
|
}
|
||||||
if err := format(&lsContext{
|
if err := format(&lsContext{
|
||||||
format: ctx.Format,
|
format: ctx.Format,
|
||||||
|
trunc: ctx.Trunc,
|
||||||
Builder: &lsBuilder{
|
Builder: &lsBuilder{
|
||||||
Builder: b,
|
Builder: b,
|
||||||
Current: b.Name == current.Name,
|
Current: b.Name == current.Name,
|
||||||
@@ -196,6 +208,7 @@ type lsContext struct {
|
|||||||
Builder *lsBuilder
|
Builder *lsBuilder
|
||||||
|
|
||||||
format formatter.Format
|
format formatter.Format
|
||||||
|
trunc bool
|
||||||
node builder.Node
|
node builder.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -261,7 +274,11 @@ func (c *lsContext) Platforms() string {
|
|||||||
if c.node.Name == "" {
|
if c.node.Name == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return strings.Join(platformutil.FormatInGroups(c.node.Node.Platforms, c.node.Platforms), ", ")
|
pfs := platformutil.FormatInGroups(c.node.Node.Platforms, c.node.Platforms)
|
||||||
|
if c.trunc && c.format.IsTable() {
|
||||||
|
return truncPlatforms(pfs, 4).String()
|
||||||
|
}
|
||||||
|
return strings.Join(pfs, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *lsContext) Error() string {
|
func (c *lsContext) Error() string {
|
||||||
@@ -272,3 +289,131 @@ func (c *lsContext) Error() string {
|
|||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var truncMajorPlatforms = []string{
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/arm",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/s390x",
|
||||||
|
"linux/riscv64",
|
||||||
|
"linux/mips64",
|
||||||
|
}
|
||||||
|
|
||||||
|
type truncatedPlatforms struct {
|
||||||
|
res map[string][]string
|
||||||
|
input []string
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp truncatedPlatforms) List() map[string][]string {
|
||||||
|
return tp.res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp truncatedPlatforms) String() string {
|
||||||
|
var out []string
|
||||||
|
var count int
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for k := range tp.res {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
for _, mpf := range truncMajorPlatforms {
|
||||||
|
if tpf, ok := tp.res[mpf]; ok {
|
||||||
|
seen[mpf] = struct{}{}
|
||||||
|
if len(tpf) == 1 {
|
||||||
|
out = append(out, tpf[0])
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
hasPreferredPlatform := false
|
||||||
|
for _, pf := range tpf {
|
||||||
|
if strings.HasSuffix(pf, "*") {
|
||||||
|
hasPreferredPlatform = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mainpf := mpf
|
||||||
|
if hasPreferredPlatform {
|
||||||
|
mainpf += "*"
|
||||||
|
}
|
||||||
|
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tpf)))
|
||||||
|
count += len(tpf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mpf := range keys {
|
||||||
|
if len(out) >= tp.max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, ok := seen[mpf]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(tp.res[mpf]) == 1 {
|
||||||
|
out = append(out, tp.res[mpf][0])
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
hasPreferredPlatform := false
|
||||||
|
for _, pf := range tp.res[mpf] {
|
||||||
|
if strings.HasSuffix(pf, "*") {
|
||||||
|
hasPreferredPlatform = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mainpf := mpf
|
||||||
|
if hasPreferredPlatform {
|
||||||
|
mainpf += "*"
|
||||||
|
}
|
||||||
|
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tp.res[mpf])))
|
||||||
|
count += len(tp.res[mpf])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
left := len(tp.input) - count
|
||||||
|
if left > 0 {
|
||||||
|
out = append(out, fmt.Sprintf("(%d more)", left))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(out, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncPlatforms(pfs []string, max int) truncatedPlatforms {
|
||||||
|
res := make(map[string][]string)
|
||||||
|
for _, mpf := range truncMajorPlatforms {
|
||||||
|
for _, pf := range pfs {
|
||||||
|
if len(res) >= max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pp.OS+"/"+pp.Architecture == mpf {
|
||||||
|
res[mpf] = append(res[mpf], pf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
left := make(map[string][]string)
|
||||||
|
for _, pf := range pfs {
|
||||||
|
if len(res) >= max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ppf := strings.TrimSuffix(pp.OS+"/"+pp.Architecture, "*")
|
||||||
|
if _, ok := res[ppf]; !ok {
|
||||||
|
left[ppf] = append(left[ppf], pf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maps.Copy(res, left)
|
||||||
|
return truncatedPlatforms{
|
||||||
|
res: res,
|
||||||
|
input: pfs,
|
||||||
|
max: max,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
174
commands/ls_test.go
Normal file
174
commands/ls_test.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTruncPlatforms(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
platforms []string
|
||||||
|
max int
|
||||||
|
expectedList map[string][]string
|
||||||
|
expectedOut string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "arm64 preferred and emulated",
|
||||||
|
platforms: []string{"linux/arm64*", "linux/amd64", "linux/amd64/v2", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/386", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v2",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64*",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64 (+2), linux/arm64*, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "riscv64 preferred only",
|
||||||
|
platforms: []string{"linux/riscv64*"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/riscv64": {
|
||||||
|
"linux/riscv64*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/riscv64*",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "amd64 no preferred and emulated",
|
||||||
|
platforms: []string{"linux/amd64", "linux/amd64/v2", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v2",
|
||||||
|
"linux/amd64/v3",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
}},
|
||||||
|
expectedOut: "linux/amd64 (+3), linux/arm64, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "amd64 no preferred",
|
||||||
|
platforms: []string{"linux/amd64", "linux/386"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/386": {
|
||||||
|
"linux/386",
|
||||||
|
},
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64, linux/386",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "arm64 no preferred",
|
||||||
|
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64, linux/arm (+2)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all preferred",
|
||||||
|
platforms: []string{"darwin/arm64*", "linux/arm64*", "linux/arm/v5*", "linux/arm/v6*", "linux/arm/v7*", "windows/arm64*"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"darwin/arm64": {
|
||||||
|
"darwin/arm64*",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v5*",
|
||||||
|
"linux/arm/v6*",
|
||||||
|
"linux/arm/v7*",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64*",
|
||||||
|
},
|
||||||
|
"windows/arm64": {
|
||||||
|
"windows/arm64*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64*, linux/arm* (+3), darwin/arm64*, windows/arm64*",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no major preferred",
|
||||||
|
platforms: []string{"linux/amd64/v2*", "linux/arm/v6*", "linux/mips64le*", "linux/amd64", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64", "linux/arm/v7"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64/v2*",
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v3",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v6*",
|
||||||
|
"linux/arm/v7",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64* (+3), linux/arm64, linux/arm* (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no major with multiple variants",
|
||||||
|
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6", "linux/mips64le/softfloat", "linux/mips64le/hardfloat"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/mips64le": {
|
||||||
|
"linux/mips64le/softfloat",
|
||||||
|
"linux/mips64le/hardfloat",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64, linux/arm (+2), linux/mips64le (+2)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tpfs := truncPlatforms(tt.platforms, tt.max)
|
||||||
|
assert.Equal(t, tt.expectedList, tpfs.List())
|
||||||
|
assert.Equal(t, tt.expectedOut, tpfs.String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,6 +16,9 @@ import (
|
|||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
pb "github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -25,7 +28,9 @@ type pruneOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
all bool
|
all bool
|
||||||
filter opts.FilterOpt
|
filter opts.FilterOpt
|
||||||
keepStorage opts.MemBytes
|
reservedSpace opts.MemBytes
|
||||||
|
maxUsedSpace opts.MemBytes
|
||||||
|
minFreeSpace opts.MemBytes
|
||||||
force bool
|
force bool
|
||||||
verbose bool
|
verbose bool
|
||||||
}
|
}
|
||||||
@@ -105,8 +110,19 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// check if the client supports newer prune options
|
||||||
|
if opts.maxUsedSpace.Value() != 0 || opts.minFreeSpace.Value() != 0 {
|
||||||
|
caps, err := loadLLBCaps(ctx, c)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to load buildkit capabilities for prune")
|
||||||
|
}
|
||||||
|
if caps.Supports(pb.CapGCFreeSpaceFilter) != nil {
|
||||||
|
return errors.New("buildkit v0.17.0+ is required for max-used-space and min-free-space filters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
popts := []client.PruneOption{
|
popts := []client.PruneOption{
|
||||||
client.WithKeepOpt(pi.KeepDuration, opts.keepStorage.Value()),
|
client.WithKeepOpt(pi.KeepDuration, opts.reservedSpace.Value(), opts.maxUsedSpace.Value(), opts.minFreeSpace.Value()),
|
||||||
client.WithFilter(pi.Filter),
|
client.WithFilter(pi.Filter),
|
||||||
}
|
}
|
||||||
if opts.all {
|
if opts.all {
|
||||||
@@ -131,6 +147,17 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadLLBCaps(ctx context.Context, c *client.Client) (apicaps.CapSet, error) {
|
||||||
|
var caps apicaps.CapSet
|
||||||
|
_, err := c.Build(ctx, client.SolveOpt{
|
||||||
|
Internal: true,
|
||||||
|
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
caps = c.BuildOpts().LLBCaps
|
||||||
|
return nil, nil
|
||||||
|
}, nil)
|
||||||
|
return caps, err
|
||||||
|
}
|
||||||
|
|
||||||
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||||
|
|
||||||
@@ -148,10 +175,15 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
|
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
|
||||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
||||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
flags.Var(&options.reservedSpace, "reserved-space", "Amount of disk space always allowed to keep for cache")
|
||||||
|
flags.Var(&options.minFreeSpace, "min-free-space", "Target amount of free disk space after pruning")
|
||||||
|
flags.Var(&options.maxUsedSpace, "max-used-space", "Maximum amount of disk space allowed to keep for cache")
|
||||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||||
|
|
||||||
|
flags.Var(&options.reservedSpace, "keep-storage", "Amount of disk space to keep for cache")
|
||||||
|
flags.MarkDeprecated("keep-storage", "keep-storage flag has been changed to max-storage")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -150,8 +150,9 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
debugcmd "github.com/docker/buildx/commands/debug"
|
debugcmd "github.com/docker/buildx/commands/debug"
|
||||||
|
historycmd "github.com/docker/buildx/commands/history"
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/controller/remote"
|
"github.com/docker/buildx/controller/remote"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
@@ -14,13 +16,14 @@ import (
|
|||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra.Command {
|
||||||
var opt rootOptions
|
var opt rootOptions
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Short: "Docker Buildx",
|
Short: "Docker Buildx",
|
||||||
@@ -36,13 +39,32 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
if opt.debug {
|
if opt.debug {
|
||||||
debug.Enable()
|
debug.Enable()
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.SetContext(appcontext.Context())
|
cmd.SetContext(appcontext.Context())
|
||||||
if !isPlugin {
|
if !isPlugin {
|
||||||
return nil
|
// InstallFlags and SetDefaultOptions are necessary to match
|
||||||
|
// the plugin mode behavior to handle env vars such as
|
||||||
|
// DOCKER_TLS, DOCKER_TLS_VERIFY, ... and we also need to use a
|
||||||
|
// new flagset to avoid conflict with the global debug flag
|
||||||
|
// that we already handle in the root command otherwise it
|
||||||
|
// would panic.
|
||||||
|
nflags := pflag.NewFlagSet(cmd.DisplayName(), pflag.ContinueOnError)
|
||||||
|
options := cliflags.NewClientOptions()
|
||||||
|
options.InstallFlags(nflags)
|
||||||
|
options.SetDefaultOptions(nflags)
|
||||||
|
return dockerCli.Initialize(options)
|
||||||
}
|
}
|
||||||
return plugin.PersistentPreRunE(cmd, args)
|
return plugin.PersistentPreRunE(cmd, args)
|
||||||
},
|
},
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return cmd.Help()
|
||||||
|
}
|
||||||
|
_ = cmd.Help()
|
||||||
|
return cli.StatusError{
|
||||||
|
StatusCode: 1,
|
||||||
|
Status: fmt.Sprintf("ERROR: unknown command: %q", args[0]),
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if !isPlugin {
|
if !isPlugin {
|
||||||
// match plugin behavior for standalone mode
|
// match plugin behavior for standalone mode
|
||||||
@@ -95,7 +117,8 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
|||||||
versionCmd(dockerCli),
|
versionCmd(dockerCli),
|
||||||
pruneCmd(dockerCli, opts),
|
pruneCmd(dockerCli, opts),
|
||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||||
|
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
|
||||||
)
|
)
|
||||||
if confutil.IsExperimental() {
|
if confutil.IsExperimental() {
|
||||||
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||||
|
|||||||
@@ -46,7 +46,6 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,9 +34,9 @@ const defaultTargetName = "default"
|
|||||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||||
// inspect the result and debug the cause of that error.
|
// inspect the result and debug the cause of that error.
|
||||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
||||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||||
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts := map[string]build.NamedContext{}
|
contexts := map[string]build.NamedContext{}
|
||||||
@@ -70,16 +70,18 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
|
|
||||||
platforms, err := platformutil.Parse(in.Platforms)
|
platforms, err := platformutil.Parse(in.Platforms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
opts.Platforms = platforms
|
opts.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := dockerCli.ConfigFile()
|
dockerConfig := dockerCli.ConfigFile()
|
||||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
|
||||||
|
ConfigFile: dockerConfig,
|
||||||
|
}))
|
||||||
|
|
||||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
opts.Session = append(opts.Session, secrets)
|
opts.Session = append(opts.Session, secrets)
|
||||||
|
|
||||||
@@ -89,13 +91,13 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
}
|
}
|
||||||
ssh, err := controllerapi.CreateSSH(sshSpecs)
|
ssh, err := controllerapi.CreateSSH(sshSpecs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
opts.Session = append(opts.Session, ssh)
|
opts.Session = append(opts.Session, ssh)
|
||||||
|
|
||||||
outputs, err := controllerapi.CreateExports(in.Exports)
|
outputs, _, err := controllerapi.CreateExports(in.Exports)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if in.ExportPush {
|
if in.ExportPush {
|
||||||
var pushUsed bool
|
var pushUsed bool
|
||||||
@@ -134,7 +136,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
|
|
||||||
annotations, err := buildflags.ParseAnnotations(in.Annotations)
|
annotations, err := buildflags.ParseAnnotations(in.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "parse annotations")
|
return nil, nil, nil, errors.Wrap(err, "parse annotations")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, o := range outputs {
|
for _, o := range outputs {
|
||||||
@@ -154,7 +156,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
|
|
||||||
allow, err := buildflags.ParseEntitlements(in.Allow)
|
allow, err := buildflags.ParseEntitlements(in.Allow)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
opts.Allow = allow
|
opts.Allow = allow
|
||||||
|
|
||||||
@@ -178,23 +180,28 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
builder.WithContextPathHash(contextPathHash),
|
builder.WithContextPathHash(contextPathHash),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
return nil, nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
}
|
}
|
||||||
nodes, err := b.LoadNodes(ctx)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
var inputs *build.Inputs
|
||||||
|
buildOptions := map[string]build.Options{defaultTargetName: opts}
|
||||||
|
resp, res, err := buildTargets(ctx, dockerCli, nodes, buildOptions, progress, generateResult)
|
||||||
err = wrapBuildError(err, false)
|
err = wrapBuildError(err, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||||
return nil, res, err
|
return nil, res, nil, err
|
||||||
}
|
}
|
||||||
return resp, res, nil
|
if i, ok := buildOptions[defaultTargetName]; ok {
|
||||||
|
inputs = &i.Inputs
|
||||||
|
}
|
||||||
|
return resp, res, inputs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildTargets runs the specified build and returns the result.
|
// buildTargets runs the specified build and returns the result.
|
||||||
@@ -209,7 +216,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
|
|||||||
if generateResult {
|
if generateResult {
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var idx int
|
var idx int
|
||||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
if res == nil || driverIndex < idx {
|
if res == nil || driverIndex < idx {
|
||||||
@@ -217,7 +224,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), progress)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, res, err
|
return nil, res, err
|
||||||
|
|||||||
@@ -4,18 +4,19 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BuildxController interface {
|
type BuildxController interface {
|
||||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
|
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||||
// Invoke starts an IO session into the specified process.
|
// Invoke starts an IO session into the specified process.
|
||||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
// If pid doesn't match to any running processes, it starts a new process with the specified config.
|
||||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
|
||||||
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
||||||
Invoke(ctx context.Context, ref, pid string, options controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
||||||
Kill(ctx context.Context) error
|
Kill(ctx context.Context) error
|
||||||
Close() error
|
Close() error
|
||||||
List(ctx context.Context) (refs []string, _ error)
|
List(ctx context.Context) (refs []string, _ error)
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
package errdefs
|
package errdefs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/containerd/typeurl/v2"
|
"github.com/containerd/typeurl/v2"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -10,7 +13,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type BuildError struct {
|
type BuildError struct {
|
||||||
Build
|
*Build
|
||||||
error
|
error
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -19,16 +22,27 @@ func (e *BuildError) Unwrap() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *BuildError) ToProto() grpcerrors.TypedErrorProto {
|
func (e *BuildError) ToProto() grpcerrors.TypedErrorProto {
|
||||||
return &e.Build
|
return e.Build
|
||||||
}
|
}
|
||||||
|
|
||||||
func WrapBuild(err error, ref string) error {
|
func (e *BuildError) PrintBuildDetails(w io.Writer) error {
|
||||||
|
if e.Ref == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ebr := &desktop.ErrorWithBuildRef{
|
||||||
|
Ref: e.Ref,
|
||||||
|
Err: e.error,
|
||||||
|
}
|
||||||
|
return ebr.Print(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WrapBuild(err error, sessionID string, ref string) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &BuildError{Build: Build{Ref: ref}, error: err}
|
return &BuildError{Build: &Build{SessionID: sessionID, Ref: ref}, error: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Build) WrapError(err error) error {
|
func (b *Build) WrapError(err error) error {
|
||||||
return &BuildError{error: err, Build: *b}
|
return &BuildError{error: err, Build: b}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,77 +1,157 @@
|
|||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: errdefs.proto
|
// versions:
|
||||||
|
// protoc-gen-go v1.34.1
|
||||||
|
// protoc v3.11.4
|
||||||
|
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
|
||||||
|
|
||||||
package errdefs
|
package errdefs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/gogo/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
_ "github.com/moby/buildkit/solver/pb"
|
reflect "reflect"
|
||||||
math "math"
|
sync "sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
const (
|
||||||
var _ = proto.Marshal
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
var _ = fmt.Errorf
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
var _ = math.Inf
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
)
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
type Build struct {
|
type Build struct {
|
||||||
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
state protoimpl.MessageState
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
sizeCache protoimpl.SizeCache
|
||||||
XXX_unrecognized []byte `json:"-"`
|
unknownFields protoimpl.UnknownFields
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
|
SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"`
|
||||||
|
Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Build) Reset() {
|
||||||
|
*x = Build{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Build) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Build) Reset() { *m = Build{} }
|
|
||||||
func (m *Build) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Build) ProtoMessage() {}
|
func (*Build) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *Build) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use Build.ProtoReflect.Descriptor instead.
|
||||||
func (*Build) Descriptor() ([]byte, []int) {
|
func (*Build) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_689dc58a5060aff5, []int{0}
|
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
|
||||||
func (m *Build) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Build.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Build.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Build) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Build.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Build) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Build.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Build) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Build.DiscardUnknown(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var xxx_messageInfo_Build proto.InternalMessageInfo
|
func (x *Build) GetSessionID() string {
|
||||||
|
if x != nil {
|
||||||
func (m *Build) GetRef() string {
|
return x.SessionID
|
||||||
if m != nil {
|
|
||||||
return m.Ref
|
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func (x *Build) GetRef() string {
|
||||||
proto.RegisterType((*Build)(nil), "errdefs.Build")
|
if x != nil {
|
||||||
|
return x.Ref
|
||||||
|
}
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("errdefs.proto", fileDescriptor_689dc58a5060aff5) }
|
var File_github_com_docker_buildx_controller_errdefs_errdefs_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var fileDescriptor_689dc58a5060aff5 = []byte{
|
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = []byte{
|
||||||
// 111 bytes of a gzipped FileDescriptorProto
|
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63,
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x2d, 0x2a, 0x4a,
|
0x6b, 0x65, 0x72, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
0x49, 0x4d, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x74, 0xd2,
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2f, 0x65, 0x72,
|
||||||
0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x73, 0xf3, 0x93, 0x2a, 0xf5, 0x93,
|
0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x6f, 0x63,
|
||||||
0x4a, 0x33, 0x73, 0x52, 0xb2, 0x33, 0x4b, 0xf4, 0x8b, 0xf3, 0x73, 0xca, 0x52, 0x8b, 0xf4, 0x0b,
|
0x6b, 0x65, 0x72, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65,
|
||||||
0x92, 0xf4, 0xf3, 0x0b, 0xa0, 0xda, 0x94, 0x24, 0xb9, 0x58, 0x9d, 0x40, 0xf2, 0x42, 0x02, 0x5c,
|
0x66, 0x73, 0x22, 0x37, 0x0a, 0x05, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x53,
|
||||||
0xcc, 0x41, 0xa9, 0x69, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0x66, 0x12, 0x1b, 0x58,
|
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||||
0x85, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x56, 0x52, 0x41, 0x91, 0x69, 0x00, 0x00, 0x00,
|
0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66,
|
||||||
|
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x42, 0x2d, 0x5a, 0x2b, 0x67,
|
||||||
|
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72,
|
||||||
|
0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
||||||
|
0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce sync.Once
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP() []byte {
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce.Do(func() {
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||||
|
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = []interface{}{
|
||||||
|
(*Build)(nil), // 0: docker.buildx.errdefs.Build
|
||||||
|
}
|
||||||
|
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = []int32{
|
||||||
|
0, // [0:0] is the sub-list for method output_type
|
||||||
|
0, // [0:0] is the sub-list for method input_type
|
||||||
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() }
|
||||||
|
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() {
|
||||||
|
if File_github_com_docker_buildx_controller_errdefs_errdefs_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*Build); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 1,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes,
|
||||||
|
DependencyIndexes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs,
|
||||||
|
MessageInfos: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_github_com_docker_buildx_controller_errdefs_errdefs_proto = out.File
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = nil
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = nil
|
||||||
|
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package errdefs;
|
package docker.buildx.errdefs;
|
||||||
|
|
||||||
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
option go_package = "github.com/docker/buildx/controller/errdefs";
|
||||||
|
|
||||||
message Build {
|
message Build {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
|
string Ref = 2;
|
||||||
}
|
}
|
||||||
241
controller/errdefs/errdefs_vtproto.pb.go
Normal file
241
controller/errdefs/errdefs_vtproto.pb.go
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
||||||
|
// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10
|
||||||
|
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
|
||||||
|
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
io "io"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m *Build) CloneVT() *Build {
|
||||||
|
if m == nil {
|
||||||
|
return (*Build)(nil)
|
||||||
|
}
|
||||||
|
r := new(Build)
|
||||||
|
r.SessionID = m.SessionID
|
||||||
|
r.Ref = m.Ref
|
||||||
|
if len(m.unknownFields) > 0 {
|
||||||
|
r.unknownFields = make([]byte, len(m.unknownFields))
|
||||||
|
copy(r.unknownFields, m.unknownFields)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Build) CloneMessageVT() proto.Message {
|
||||||
|
return m.CloneVT()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Build) EqualVT(that *Build) bool {
|
||||||
|
if this == that {
|
||||||
|
return true
|
||||||
|
} else if this == nil || that == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.SessionID != that.SessionID {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Ref != that.Ref {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return string(this.unknownFields) == string(that.unknownFields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Build) EqualMessageVT(thatMsg proto.Message) bool {
|
||||||
|
that, ok := thatMsg.(*Build)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return this.EqualVT(that)
|
||||||
|
}
|
||||||
|
func (m *Build) MarshalVT() (dAtA []byte, err error) {
|
||||||
|
if m == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
size := m.SizeVT()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Build) MarshalToVT(dAtA []byte) (int, error) {
|
||||||
|
size := m.SizeVT()
|
||||||
|
return m.MarshalToSizedBufferVT(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Build) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
||||||
|
if m == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.unknownFields != nil {
|
||||||
|
i -= len(m.unknownFields)
|
||||||
|
copy(dAtA[i:], m.unknownFields)
|
||||||
|
}
|
||||||
|
if len(m.Ref) > 0 {
|
||||||
|
i -= len(m.Ref)
|
||||||
|
copy(dAtA[i:], m.Ref)
|
||||||
|
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ref)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
if len(m.SessionID) > 0 {
|
||||||
|
i -= len(m.SessionID)
|
||||||
|
copy(dAtA[i:], m.SessionID)
|
||||||
|
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SessionID)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Build) SizeVT() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.SessionID)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Ref)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
|
||||||
|
}
|
||||||
|
n += len(m.unknownFields)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Build) UnmarshalVT(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return protohelpers.ErrIntOverflow
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Build: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return protohelpers.ErrIntOverflow
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return protohelpers.ErrInvalidLength
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return protohelpers.ErrInvalidLength
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.SessionID = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return protohelpers.ErrIntOverflow
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return protohelpers.ErrInvalidLength
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return protohelpers.ErrInvalidLength
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Ref = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := protohelpers.Skip(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return protohelpers.ErrInvalidLength
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
package errdefs
|
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../vendor/ --gogo_out=plugins=grpc:. errdefs.proto
|
|
||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/controller/processes"
|
"github.com/docker/buildx/controller/processes"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/docker/buildx/util/ioset"
|
"github.com/docker/buildx/util/ioset"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -21,7 +22,7 @@ import (
|
|||||||
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
|
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
|
||||||
return &localController{
|
return &localController{
|
||||||
dockerCli: dockerCli,
|
dockerCli: dockerCli,
|
||||||
ref: "local",
|
sessionID: "local",
|
||||||
processes: processes.NewManager(),
|
processes: processes.NewManager(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -35,46 +36,51 @@ type buildConfig struct {
|
|||||||
|
|
||||||
type localController struct {
|
type localController struct {
|
||||||
dockerCli command.Cli
|
dockerCli command.Cli
|
||||||
ref string
|
sessionID string
|
||||||
buildConfig buildConfig
|
buildConfig buildConfig
|
||||||
processes *processes.Manager
|
processes *processes.Manager
|
||||||
|
|
||||||
buildOnGoing atomic.Bool
|
buildOnGoing atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
func (b *localController) Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
||||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||||
return "", nil, errors.New("build ongoing")
|
return "", nil, nil, errors.New("build ongoing")
|
||||||
}
|
}
|
||||||
defer b.buildOnGoing.Store(false)
|
defer b.buildOnGoing.Store(false)
|
||||||
|
|
||||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
resp, res, dockerfileMappings, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||||
// NOTE: RunBuild can return *build.ResultHandle even on error.
|
// NOTE: RunBuild can return *build.ResultHandle even on error.
|
||||||
if res != nil {
|
if res != nil {
|
||||||
b.buildConfig = buildConfig{
|
b.buildConfig = buildConfig{
|
||||||
resultCtx: res,
|
resultCtx: res,
|
||||||
buildOptions: &options,
|
buildOptions: options,
|
||||||
}
|
}
|
||||||
if buildErr != nil {
|
if buildErr != nil {
|
||||||
buildErr = controllererrors.WrapBuild(buildErr, b.ref)
|
var ref string
|
||||||
|
var ebr *desktop.ErrorWithBuildRef
|
||||||
|
if errors.As(buildErr, &ebr) {
|
||||||
|
ref = ebr.Ref
|
||||||
|
}
|
||||||
|
buildErr = controllererrors.WrapBuild(buildErr, b.sessionID, ref)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if buildErr != nil {
|
if buildErr != nil {
|
||||||
return "", nil, buildErr
|
return "", nil, nil, buildErr
|
||||||
}
|
}
|
||||||
return b.ref, resp, nil
|
return b.sessionID, resp, dockerfileMappings, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) {
|
func (b *localController) ListProcesses(ctx context.Context, sessionID string) (infos []*controllerapi.ProcessInfo, retErr error) {
|
||||||
if ref != b.ref {
|
if sessionID != b.sessionID {
|
||||||
return nil, errors.Errorf("unknown ref %q", ref)
|
return nil, errors.Errorf("unknown session ID %q", sessionID)
|
||||||
}
|
}
|
||||||
return b.processes.ListProcesses(), nil
|
return b.processes.ListProcesses(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) DisconnectProcess(ctx context.Context, ref, pid string) error {
|
func (b *localController) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
|
||||||
if ref != b.ref {
|
if sessionID != b.sessionID {
|
||||||
return errors.Errorf("unknown ref %q", ref)
|
return errors.Errorf("unknown session ID %q", sessionID)
|
||||||
}
|
}
|
||||||
return b.processes.DeleteProcess(pid)
|
return b.processes.DeleteProcess(pid)
|
||||||
}
|
}
|
||||||
@@ -83,9 +89,9 @@ func (b *localController) cancelRunningProcesses() {
|
|||||||
b.processes.CancelRunningProcesses()
|
b.processes.CancelRunningProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Invoke(ctx context.Context, ref string, pid string, cfg controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
func (b *localController) Invoke(ctx context.Context, sessionID string, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
||||||
if ref != b.ref {
|
if sessionID != b.sessionID {
|
||||||
return errors.Errorf("unknown ref %q", ref)
|
return errors.Errorf("unknown session ID %q", sessionID)
|
||||||
}
|
}
|
||||||
|
|
||||||
proc, ok := b.processes.Get(pid)
|
proc, ok := b.processes.Get(pid)
|
||||||
@@ -95,7 +101,7 @@ func (b *localController) Invoke(ctx context.Context, ref string, pid string, cf
|
|||||||
return errors.New("no build result is registered")
|
return errors.New("no build result is registered")
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
proc, err = b.processes.StartProcess(pid, b.buildConfig.resultCtx, &cfg)
|
proc, err = b.processes.StartProcess(pid, b.buildConfig.resultCtx, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -103,7 +109,7 @@ func (b *localController) Invoke(ctx context.Context, ref string, pid string, cf
|
|||||||
|
|
||||||
// Attach containerIn to this process
|
// Attach containerIn to this process
|
||||||
ioCancelledCh := make(chan struct{})
|
ioCancelledCh := make(chan struct{})
|
||||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func() { close(ioCancelledCh) })
|
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func(error) { close(ioCancelledCh) })
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ioCancelledCh:
|
case <-ioCancelledCh:
|
||||||
@@ -111,7 +117,7 @@ func (b *localController) Invoke(ctx context.Context, ref string, pid string, cf
|
|||||||
case err := <-proc.Done():
|
case err := <-proc.Done():
|
||||||
return err
|
return err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,7 +136,7 @@ func (b *localController) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) List(ctx context.Context) (res []string, _ error) {
|
func (b *localController) List(ctx context.Context) (res []string, _ error) {
|
||||||
return []string{b.ref}, nil
|
return []string{b.sessionID}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Disconnect(ctx context.Context, key string) error {
|
func (b *localController) Disconnect(ctx context.Context, key string) error {
|
||||||
@@ -138,9 +144,9 @@ func (b *localController) Disconnect(ctx context.Context, key string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *localController) Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error) {
|
func (b *localController) Inspect(ctx context.Context, sessionID string) (*controllerapi.InspectResponse, error) {
|
||||||
if ref != b.ref {
|
if sessionID != b.sessionID {
|
||||||
return nil, errors.Errorf("unknown ref %q", ref)
|
return nil, errors.Errorf("unknown session ID %q", sessionID)
|
||||||
}
|
}
|
||||||
return &controllerapi.InspectResponse{Options: b.buildConfig.buildOptions}, nil
|
return &controllerapi.InspectResponse{Options: b.buildConfig.buildOptions}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import "github.com/moby/buildkit/client"
|
import (
|
||||||
|
"maps"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
)
|
||||||
|
|
||||||
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
||||||
var outs []client.CacheOptionsEntry
|
var outs []client.CacheOptionsEntry
|
||||||
@@ -12,9 +16,7 @@ func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
|
|||||||
Type: entry.Type,
|
Type: entry.Type,
|
||||||
Attrs: map[string]string{},
|
Attrs: map[string]string{},
|
||||||
}
|
}
|
||||||
for k, v := range entry.Attrs {
|
maps.Copy(out.Attrs, entry.Attrs)
|
||||||
out.Attrs[k] = v
|
|
||||||
}
|
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
}
|
}
|
||||||
return outs
|
return outs
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@ package buildx.controller.v1;
|
|||||||
import "github.com/moby/buildkit/api/services/control/control.proto";
|
import "github.com/moby/buildkit/api/services/control/control.proto";
|
||||||
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
|
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
|
||||||
|
|
||||||
option go_package = "pb";
|
option go_package = "github.com/docker/buildx/controller/pb";
|
||||||
|
|
||||||
service Controller {
|
service Controller {
|
||||||
rpc Build(BuildRequest) returns (BuildResponse);
|
rpc Build(BuildRequest) returns (BuildResponse);
|
||||||
@@ -21,7 +21,7 @@ service Controller {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ListProcessesRequest {
|
message ListProcessesRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListProcessesResponse {
|
message ListProcessesResponse {
|
||||||
@@ -34,7 +34,7 @@ message ProcessInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message DisconnectProcessRequest {
|
message DisconnectProcessRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
string ProcessID = 2;
|
string ProcessID = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +42,7 @@ message DisconnectProcessResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message BuildRequest {
|
message BuildRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
BuildOptions Options = 2;
|
BuildOptions Options = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,7 +118,7 @@ message CallFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message InspectRequest {
|
message InspectRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InspectResponse {
|
message InspectResponse {
|
||||||
@@ -140,13 +140,13 @@ message BuildResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message DisconnectRequest {
|
message DisconnectRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DisconnectResponse {}
|
message DisconnectResponse {}
|
||||||
|
|
||||||
message ListRequest {
|
message ListRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListResponse {
|
message ListResponse {
|
||||||
@@ -161,7 +161,7 @@ message InputMessage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message InputInitMessage {
|
message InputInitMessage {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DataMessage {
|
message DataMessage {
|
||||||
@@ -186,7 +186,7 @@ message Message {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message InitMessage {
|
message InitMessage {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
|
|
||||||
// If ProcessID already exists in the server, it tries to connect to it
|
// If ProcessID already exists in the server, it tries to connect to it
|
||||||
// instead of invoking the new one. In this case, InvokeConfig will be ignored.
|
// instead of invoking the new one. In this case, InvokeConfig will be ignored.
|
||||||
@@ -227,7 +227,7 @@ message SignalMessage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message StatusRequest {
|
message StatusRequest {
|
||||||
string Ref = 1;
|
string SessionID = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message StatusResponse {
|
message StatusResponse {
|
||||||
|
|||||||
452
controller/pb/controller_grpc.pb.go
Normal file
452
controller/pb/controller_grpc.pb.go
Normal file
@@ -0,0 +1,452 @@
|
|||||||
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
|
// - protoc v3.11.4
|
||||||
|
// source: github.com/docker/buildx/controller/pb/controller.proto
|
||||||
|
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
codes "google.golang.org/grpc/codes"
|
||||||
|
status "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
// Requires gRPC-Go v1.64.0 or later.
|
||||||
|
const _ = grpc.SupportPackageIsVersion9
|
||||||
|
|
||||||
|
const (
|
||||||
|
Controller_Build_FullMethodName = "/buildx.controller.v1.Controller/Build"
|
||||||
|
Controller_Inspect_FullMethodName = "/buildx.controller.v1.Controller/Inspect"
|
||||||
|
Controller_Status_FullMethodName = "/buildx.controller.v1.Controller/Status"
|
||||||
|
Controller_Input_FullMethodName = "/buildx.controller.v1.Controller/Input"
|
||||||
|
Controller_Invoke_FullMethodName = "/buildx.controller.v1.Controller/Invoke"
|
||||||
|
Controller_List_FullMethodName = "/buildx.controller.v1.Controller/List"
|
||||||
|
Controller_Disconnect_FullMethodName = "/buildx.controller.v1.Controller/Disconnect"
|
||||||
|
Controller_Info_FullMethodName = "/buildx.controller.v1.Controller/Info"
|
||||||
|
Controller_ListProcesses_FullMethodName = "/buildx.controller.v1.Controller/ListProcesses"
|
||||||
|
Controller_DisconnectProcess_FullMethodName = "/buildx.controller.v1.Controller/DisconnectProcess"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ControllerClient is the client API for Controller service.
|
||||||
|
//
|
||||||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||||
|
type ControllerClient interface {
|
||||||
|
Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error)
|
||||||
|
Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error)
|
||||||
|
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error)
|
||||||
|
Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error)
|
||||||
|
Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error)
|
||||||
|
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
|
||||||
|
Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error)
|
||||||
|
Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
|
||||||
|
ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error)
|
||||||
|
DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type controllerClient struct {
|
||||||
|
cc grpc.ClientConnInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient {
|
||||||
|
return &controllerClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(BuildResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_Build_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(InspectResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_Inspect_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[0], Controller_Status_FullMethodName, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &grpc.GenericClientStream[StatusRequest, StatusResponse]{ClientStream: stream}
|
||||||
|
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := x.ClientStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_StatusClient = grpc.ServerStreamingClient[StatusResponse]
|
||||||
|
|
||||||
|
func (c *controllerClient) Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[1], Controller_Input_FullMethodName, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &grpc.GenericClientStream[InputMessage, InputResponse]{ClientStream: stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_InputClient = grpc.ClientStreamingClient[InputMessage, InputResponse]
|
||||||
|
|
||||||
|
func (c *controllerClient) Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[2], Controller_Invoke_FullMethodName, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &grpc.GenericClientStream[Message, Message]{ClientStream: stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_InvokeClient = grpc.BidiStreamingClient[Message, Message]
|
||||||
|
|
||||||
|
func (c *controllerClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(ListResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_List_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(DisconnectResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_Disconnect_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(InfoResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_Info_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(ListProcessesResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_ListProcesses_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controllerClient) DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error) {
|
||||||
|
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||||
|
out := new(DisconnectProcessResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Controller_DisconnectProcess_FullMethodName, in, out, cOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ControllerServer is the server API for Controller service.
|
||||||
|
// All implementations should embed UnimplementedControllerServer
|
||||||
|
// for forward compatibility.
|
||||||
|
type ControllerServer interface {
|
||||||
|
Build(context.Context, *BuildRequest) (*BuildResponse, error)
|
||||||
|
Inspect(context.Context, *InspectRequest) (*InspectResponse, error)
|
||||||
|
Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error
|
||||||
|
Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error
|
||||||
|
Invoke(grpc.BidiStreamingServer[Message, Message]) error
|
||||||
|
List(context.Context, *ListRequest) (*ListResponse, error)
|
||||||
|
Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error)
|
||||||
|
Info(context.Context, *InfoRequest) (*InfoResponse, error)
|
||||||
|
ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error)
|
||||||
|
DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnimplementedControllerServer should be embedded to have
|
||||||
|
// forward compatible implementations.
|
||||||
|
//
|
||||||
|
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||||
|
// pointer dereference when methods are called.
|
||||||
|
type UnimplementedControllerServer struct{}
|
||||||
|
|
||||||
|
func (UnimplementedControllerServer) Build(context.Context, *BuildRequest) (*BuildResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Build not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Inspect(context.Context, *InspectRequest) (*InspectResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Inspect not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error {
|
||||||
|
return status.Errorf(codes.Unimplemented, "method Status not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error {
|
||||||
|
return status.Errorf(codes.Unimplemented, "method Input not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Invoke(grpc.BidiStreamingServer[Message, Message]) error {
|
||||||
|
return status.Errorf(codes.Unimplemented, "method Invoke not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) List(context.Context, *ListRequest) (*ListResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Disconnect not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method ListProcesses not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method DisconnectProcess not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedControllerServer) testEmbeddedByValue() {}
|
||||||
|
|
||||||
|
// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service.
|
||||||
|
// Use of this interface is not recommended, as added methods to ControllerServer will
|
||||||
|
// result in compilation errors.
|
||||||
|
type UnsafeControllerServer interface {
|
||||||
|
mustEmbedUnimplementedControllerServer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) {
|
||||||
|
// If the following call pancis, it indicates UnimplementedControllerServer was
|
||||||
|
// embedded by pointer and is nil. This will cause panics if an
|
||||||
|
// unimplemented method is ever invoked, so we test this at initialization
|
||||||
|
// time to prevent it from happening at runtime later due to I/O.
|
||||||
|
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||||
|
t.testEmbeddedByValue()
|
||||||
|
}
|
||||||
|
s.RegisterService(&Controller_ServiceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(BuildRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).Build(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_Build_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).Build(ctx, req.(*BuildRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_Inspect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(InspectRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).Inspect(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_Inspect_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).Inspect(ctx, req.(*InspectRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
m := new(StatusRequest)
|
||||||
|
if err := stream.RecvMsg(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return srv.(ControllerServer).Status(m, &grpc.GenericServerStream[StatusRequest, StatusResponse]{ServerStream: stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_StatusServer = grpc.ServerStreamingServer[StatusResponse]
|
||||||
|
|
||||||
|
func _Controller_Input_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
return srv.(ControllerServer).Input(&grpc.GenericServerStream[InputMessage, InputResponse]{ServerStream: stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_InputServer = grpc.ClientStreamingServer[InputMessage, InputResponse]
|
||||||
|
|
||||||
|
func _Controller_Invoke_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
return srv.(ControllerServer).Invoke(&grpc.GenericServerStream[Message, Message]{ServerStream: stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||||
|
type Controller_InvokeServer = grpc.BidiStreamingServer[Message, Message]
|
||||||
|
|
||||||
|
func _Controller_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(ListRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).List(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_List_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).List(ctx, req.(*ListRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_Disconnect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(DisconnectRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).Disconnect(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_Disconnect_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).Disconnect(ctx, req.(*DisconnectRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(InfoRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).Info(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_Info_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).Info(ctx, req.(*InfoRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_ListProcesses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(ListProcessesRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).ListProcesses(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_ListProcesses_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).ListProcesses(ctx, req.(*ListProcessesRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Controller_DisconnectProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(DisconnectProcessRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ControllerServer).DisconnectProcess(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: Controller_DisconnectProcess_FullMethodName,
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ControllerServer).DisconnectProcess(ctx, req.(*DisconnectProcessRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service.
|
||||||
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
|
// and not to be introspected or modified (even as a copy)
|
||||||
|
var Controller_ServiceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "buildx.controller.v1.Controller",
|
||||||
|
HandlerType: (*ControllerServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "Build",
|
||||||
|
Handler: _Controller_Build_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "Inspect",
|
||||||
|
Handler: _Controller_Inspect_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "List",
|
||||||
|
Handler: _Controller_List_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "Disconnect",
|
||||||
|
Handler: _Controller_Disconnect_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "Info",
|
||||||
|
Handler: _Controller_Info_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "ListProcesses",
|
||||||
|
Handler: _Controller_ListProcesses_Handler,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
MethodName: "DisconnectProcess",
|
||||||
|
Handler: _Controller_DisconnectProcess_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{
|
||||||
|
{
|
||||||
|
StreamName: "Status",
|
||||||
|
Handler: _Controller_Status_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StreamName: "Input",
|
||||||
|
Handler: _Controller_Input_Handler,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StreamName: "Invoke",
|
||||||
|
Handler: _Controller_Invoke_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: "github.com/docker/buildx/controller/pb/controller.proto",
|
||||||
|
}
|
||||||
11430
controller/pb/controller_vtproto.pb.go
Normal file
11430
controller/pb/controller_vtproto.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package pb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -10,24 +11,23 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
|
||||||
var outs []client.ExportEntry
|
var outs []client.ExportEntry
|
||||||
|
var localPaths []string
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
var stdoutUsed bool
|
var stdoutUsed bool
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "" {
|
if entry.Type == "" {
|
||||||
return nil, errors.Errorf("type is required for output")
|
return nil, nil, errors.Errorf("type is required for output")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := client.ExportEntry{
|
out := client.ExportEntry{
|
||||||
Type: entry.Type,
|
Type: entry.Type,
|
||||||
Attrs: map[string]string{},
|
Attrs: map[string]string{},
|
||||||
}
|
}
|
||||||
for k, v := range entry.Attrs {
|
maps.Copy(out.Attrs, entry.Attrs)
|
||||||
out.Attrs[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
supportFile := false
|
supportFile := false
|
||||||
supportDir := false
|
supportDir := false
|
||||||
@@ -45,24 +45,26 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
supportDir = !tar
|
supportDir = !tar
|
||||||
case "registry":
|
case "registry":
|
||||||
out.Type = client.ExporterImage
|
out.Type = client.ExporterImage
|
||||||
|
out.Attrs["push"] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
if supportDir {
|
if supportDir {
|
||||||
if entry.Destination == "" {
|
if entry.Destination == "" {
|
||||||
return nil, errors.Errorf("dest is required for %s exporter", out.Type)
|
return nil, nil, errors.Errorf("dest is required for %s exporter", out.Type)
|
||||||
}
|
}
|
||||||
if entry.Destination == "-" {
|
if entry.Destination == "-" {
|
||||||
return nil, errors.Errorf("dest cannot be stdout for %s exporter", out.Type)
|
return nil, nil, errors.Errorf("dest cannot be stdout for %s exporter", out.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(entry.Destination)
|
fi, err := os.Stat(entry.Destination)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return nil, errors.Wrapf(err, "invalid destination directory: %s", entry.Destination)
|
return nil, nil, errors.Wrapf(err, "invalid destination directory: %s", entry.Destination)
|
||||||
}
|
}
|
||||||
if err == nil && !fi.IsDir() {
|
if err == nil && !fi.IsDir() {
|
||||||
return nil, errors.Errorf("destination directory %s is a file", entry.Destination)
|
return nil, nil, errors.Errorf("destination directory %s is a file", entry.Destination)
|
||||||
}
|
}
|
||||||
out.OutputDir = entry.Destination
|
out.OutputDir = entry.Destination
|
||||||
|
localPaths = append(localPaths, entry.Destination)
|
||||||
}
|
}
|
||||||
if supportFile {
|
if supportFile {
|
||||||
if entry.Destination == "" && out.Type != client.ExporterDocker {
|
if entry.Destination == "" && out.Type != client.ExporterDocker {
|
||||||
@@ -70,32 +72,33 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
}
|
}
|
||||||
if entry.Destination == "-" {
|
if entry.Destination == "-" {
|
||||||
if stdoutUsed {
|
if stdoutUsed {
|
||||||
return nil, errors.Errorf("multiple outputs configured to write to stdout")
|
return nil, nil, errors.Errorf("multiple outputs configured to write to stdout")
|
||||||
}
|
}
|
||||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
return nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
return nil, nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
||||||
}
|
}
|
||||||
out.Output = wrapWriteCloser(os.Stdout)
|
out.Output = wrapWriteCloser(os.Stdout)
|
||||||
stdoutUsed = true
|
stdoutUsed = true
|
||||||
} else if entry.Destination != "" {
|
} else if entry.Destination != "" {
|
||||||
fi, err := os.Stat(entry.Destination)
|
fi, err := os.Stat(entry.Destination)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return nil, errors.Wrapf(err, "invalid destination file: %s", entry.Destination)
|
return nil, nil, errors.Wrapf(err, "invalid destination file: %s", entry.Destination)
|
||||||
}
|
}
|
||||||
if err == nil && fi.IsDir() {
|
if err == nil && fi.IsDir() {
|
||||||
return nil, errors.Errorf("destination file %s is a directory", entry.Destination)
|
return nil, nil, errors.Errorf("destination file %s is a directory", entry.Destination)
|
||||||
}
|
}
|
||||||
f, err := os.Create(entry.Destination)
|
f, err := os.Create(entry.Destination)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("failed to open %s", err)
|
return nil, nil, errors.Errorf("failed to open %s", err)
|
||||||
}
|
}
|
||||||
out.Output = wrapWriteCloser(f)
|
out.Output = wrapWriteCloser(f)
|
||||||
|
localPaths = append(localPaths, entry.Destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
}
|
}
|
||||||
return outs, nil
|
return outs, localPaths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
package pb
|
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../vendor/ --gogo_out=plugins=grpc:. controller.proto
|
|
||||||
@@ -153,7 +153,6 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
|
|
||||||
}
|
}
|
||||||
s.Paths = ps
|
s.Paths = ps
|
||||||
ssh = append(ssh, s)
|
ssh = append(ssh, s)
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ package pb
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestResolvePaths(t *testing.T) {
|
func TestResolvePaths(t *testing.T) {
|
||||||
@@ -16,54 +16,58 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
require.NoError(t, os.Chdir(tmpwd))
|
require.NoError(t, os.Chdir(tmpwd))
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
options BuildOptions
|
options *BuildOptions
|
||||||
want BuildOptions
|
want *BuildOptions
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "contextpath",
|
name: "contextpath",
|
||||||
options: BuildOptions{ContextPath: "test"},
|
options: &BuildOptions{ContextPath: "test"},
|
||||||
want: BuildOptions{ContextPath: filepath.Join(tmpwd, "test")},
|
want: &BuildOptions{ContextPath: filepath.Join(tmpwd, "test")},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-cwd",
|
name: "contextpath-cwd",
|
||||||
options: BuildOptions{ContextPath: "."},
|
options: &BuildOptions{ContextPath: "."},
|
||||||
want: BuildOptions{ContextPath: tmpwd},
|
want: &BuildOptions{ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-dash",
|
name: "contextpath-dash",
|
||||||
options: BuildOptions{ContextPath: "-"},
|
options: &BuildOptions{ContextPath: "-"},
|
||||||
want: BuildOptions{ContextPath: "-"},
|
want: &BuildOptions{ContextPath: "-"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contextpath-ssh",
|
name: "contextpath-ssh",
|
||||||
options: BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
options: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
want: BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
want: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename",
|
name: "dockerfilename",
|
||||||
options: BuildOptions{DockerfileName: "test", ContextPath: "."},
|
options: &BuildOptions{DockerfileName: "test", ContextPath: "."},
|
||||||
want: BuildOptions{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
|
want: &BuildOptions{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename-dash",
|
name: "dockerfilename-dash",
|
||||||
options: BuildOptions{DockerfileName: "-", ContextPath: "."},
|
options: &BuildOptions{DockerfileName: "-", ContextPath: "."},
|
||||||
want: BuildOptions{DockerfileName: "-", ContextPath: tmpwd},
|
want: &BuildOptions{DockerfileName: "-", ContextPath: tmpwd},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "dockerfilename-remote",
|
name: "dockerfilename-remote",
|
||||||
options: BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
options: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
want: BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
want: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contexts",
|
name: "contexts",
|
||||||
options: BuildOptions{NamedContexts: map[string]string{"a": "test1", "b": "test2",
|
options: &BuildOptions{NamedContexts: map[string]string{
|
||||||
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git"}},
|
"a": "test1", "b": "test2",
|
||||||
want: BuildOptions{NamedContexts: map[string]string{"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
|
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
||||||
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git"}},
|
}},
|
||||||
|
want: &BuildOptions{NamedContexts: map[string]string{
|
||||||
|
"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
|
||||||
|
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cache-from",
|
name: "cache-from",
|
||||||
options: BuildOptions{
|
options: &BuildOptions{
|
||||||
CacheFrom: []*CacheOptionsEntry{
|
CacheFrom: []*CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -75,7 +79,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: BuildOptions{
|
want: &BuildOptions{
|
||||||
CacheFrom: []*CacheOptionsEntry{
|
CacheFrom: []*CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -90,7 +94,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cache-to",
|
name: "cache-to",
|
||||||
options: BuildOptions{
|
options: &BuildOptions{
|
||||||
CacheTo: []*CacheOptionsEntry{
|
CacheTo: []*CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -102,7 +106,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: BuildOptions{
|
want: &BuildOptions{
|
||||||
CacheTo: []*CacheOptionsEntry{
|
CacheTo: []*CacheOptionsEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -117,7 +121,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "exports",
|
name: "exports",
|
||||||
options: BuildOptions{
|
options: &BuildOptions{
|
||||||
Exports: []*ExportEntry{
|
Exports: []*ExportEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -145,7 +149,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: BuildOptions{
|
want: &BuildOptions{
|
||||||
Exports: []*ExportEntry{
|
Exports: []*ExportEntry{
|
||||||
{
|
{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
@@ -176,7 +180,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "secrets",
|
name: "secrets",
|
||||||
options: BuildOptions{
|
options: &BuildOptions{
|
||||||
Secrets: []*Secret{
|
Secrets: []*Secret{
|
||||||
{
|
{
|
||||||
FilePath: "test1",
|
FilePath: "test1",
|
||||||
@@ -191,7 +195,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: BuildOptions{
|
want: &BuildOptions{
|
||||||
Secrets: []*Secret{
|
Secrets: []*Secret{
|
||||||
{
|
{
|
||||||
FilePath: filepath.Join(tmpwd, "test1"),
|
FilePath: filepath.Join(tmpwd, "test1"),
|
||||||
@@ -209,7 +213,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ssh",
|
name: "ssh",
|
||||||
options: BuildOptions{
|
options: &BuildOptions{
|
||||||
SSH: []*SSH{
|
SSH: []*SSH{
|
||||||
{
|
{
|
||||||
ID: "default",
|
ID: "default",
|
||||||
@@ -221,7 +225,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: BuildOptions{
|
want: &BuildOptions{
|
||||||
SSH: []*SSH{
|
SSH: []*SSH{
|
||||||
{
|
{
|
||||||
ID: "default",
|
ID: "default",
|
||||||
@@ -238,10 +242,10 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got, err := ResolveOptionPaths(&tt.options)
|
got, err := ResolveOptionPaths(tt.options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if !reflect.DeepEqual(tt.want, *got) {
|
if !proto.Equal(tt.want, got) {
|
||||||
t.Fatalf("expected %#v, got %#v", tt.want, *got)
|
t.Fatalf("expected %#v, got %#v", tt.want, got)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
control "github.com/moby/buildkit/api/services/control"
|
control "github.com/moby/buildkit/api/services/control"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
)
|
)
|
||||||
|
|
||||||
type writer struct {
|
type writer struct {
|
||||||
@@ -19,25 +22,23 @@ func (w *writer) Write(status *client.SolveStatus) {
|
|||||||
w.ch <- ToControlStatus(status)
|
w.ch <- ToControlStatus(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) WriteBuildRef(target string, ref string) {
|
func (w *writer) WriteBuildRef(target string, ref string) {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
func (w *writer) ValidateLogSource(digest.Digest, any) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) ClearLogSource(interface{}) {}
|
func (w *writer) ClearLogSource(any) {}
|
||||||
|
|
||||||
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
||||||
resp := StatusResponse{}
|
resp := StatusResponse{}
|
||||||
for _, v := range s.Vertexes {
|
for _, v := range s.Vertexes {
|
||||||
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
|
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
|
||||||
Digest: v.Digest,
|
Digest: string(v.Digest),
|
||||||
Inputs: v.Inputs,
|
Inputs: digestSliceToPB(v.Inputs),
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Started: v.Started,
|
Started: timestampToPB(v.Started),
|
||||||
Completed: v.Completed,
|
Completed: timestampToPB(v.Completed),
|
||||||
Error: v.Error,
|
Error: v.Error,
|
||||||
Cached: v.Cached,
|
Cached: v.Cached,
|
||||||
ProgressGroup: v.ProgressGroup,
|
ProgressGroup: v.ProgressGroup,
|
||||||
@@ -46,26 +47,26 @@ func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
|||||||
for _, v := range s.Statuses {
|
for _, v := range s.Statuses {
|
||||||
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
|
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
|
||||||
ID: v.ID,
|
ID: v.ID,
|
||||||
Vertex: v.Vertex,
|
Vertex: string(v.Vertex),
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Total: v.Total,
|
Total: v.Total,
|
||||||
Current: v.Current,
|
Current: v.Current,
|
||||||
Timestamp: v.Timestamp,
|
Timestamp: timestamppb.New(v.Timestamp),
|
||||||
Started: v.Started,
|
Started: timestampToPB(v.Started),
|
||||||
Completed: v.Completed,
|
Completed: timestampToPB(v.Completed),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, v := range s.Logs {
|
for _, v := range s.Logs {
|
||||||
resp.Logs = append(resp.Logs, &control.VertexLog{
|
resp.Logs = append(resp.Logs, &control.VertexLog{
|
||||||
Vertex: v.Vertex,
|
Vertex: string(v.Vertex),
|
||||||
Stream: int64(v.Stream),
|
Stream: int64(v.Stream),
|
||||||
Msg: v.Data,
|
Msg: v.Data,
|
||||||
Timestamp: v.Timestamp,
|
Timestamp: timestamppb.New(v.Timestamp),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, v := range s.Warnings {
|
for _, v := range s.Warnings {
|
||||||
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
|
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
|
||||||
Vertex: v.Vertex,
|
Vertex: string(v.Vertex),
|
||||||
Level: int64(v.Level),
|
Level: int64(v.Level),
|
||||||
Short: v.Short,
|
Short: v.Short,
|
||||||
Detail: v.Detail,
|
Detail: v.Detail,
|
||||||
@@ -81,11 +82,11 @@ func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
|
|||||||
s := client.SolveStatus{}
|
s := client.SolveStatus{}
|
||||||
for _, v := range resp.Vertexes {
|
for _, v := range resp.Vertexes {
|
||||||
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
||||||
Digest: v.Digest,
|
Digest: digest.Digest(v.Digest),
|
||||||
Inputs: v.Inputs,
|
Inputs: digestSliceFromPB(v.Inputs),
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Started: v.Started,
|
Started: timestampFromPB(v.Started),
|
||||||
Completed: v.Completed,
|
Completed: timestampFromPB(v.Completed),
|
||||||
Error: v.Error,
|
Error: v.Error,
|
||||||
Cached: v.Cached,
|
Cached: v.Cached,
|
||||||
ProgressGroup: v.ProgressGroup,
|
ProgressGroup: v.ProgressGroup,
|
||||||
@@ -94,26 +95,26 @@ func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
|
|||||||
for _, v := range resp.Statuses {
|
for _, v := range resp.Statuses {
|
||||||
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
||||||
ID: v.ID,
|
ID: v.ID,
|
||||||
Vertex: v.Vertex,
|
Vertex: digest.Digest(v.Vertex),
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Total: v.Total,
|
Total: v.Total,
|
||||||
Current: v.Current,
|
Current: v.Current,
|
||||||
Timestamp: v.Timestamp,
|
Timestamp: v.Timestamp.AsTime(),
|
||||||
Started: v.Started,
|
Started: timestampFromPB(v.Started),
|
||||||
Completed: v.Completed,
|
Completed: timestampFromPB(v.Completed),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, v := range resp.Logs {
|
for _, v := range resp.Logs {
|
||||||
s.Logs = append(s.Logs, &client.VertexLog{
|
s.Logs = append(s.Logs, &client.VertexLog{
|
||||||
Vertex: v.Vertex,
|
Vertex: digest.Digest(v.Vertex),
|
||||||
Stream: int(v.Stream),
|
Stream: int(v.Stream),
|
||||||
Data: v.Msg,
|
Data: v.Msg,
|
||||||
Timestamp: v.Timestamp,
|
Timestamp: v.Timestamp.AsTime(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, v := range resp.Warnings {
|
for _, v := range resp.Warnings {
|
||||||
s.Warnings = append(s.Warnings, &client.VertexWarning{
|
s.Warnings = append(s.Warnings, &client.VertexWarning{
|
||||||
Vertex: v.Vertex,
|
Vertex: digest.Digest(v.Vertex),
|
||||||
Level: int(v.Level),
|
Level: int(v.Level),
|
||||||
Short: v.Short,
|
Short: v.Short,
|
||||||
Detail: v.Detail,
|
Detail: v.Detail,
|
||||||
@@ -124,3 +125,38 @@ func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
|
|||||||
}
|
}
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func timestampFromPB(ts *timestamppb.Timestamp) *time.Time {
|
||||||
|
if ts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := ts.AsTime()
|
||||||
|
if t.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
func timestampToPB(ts *time.Time) *timestamppb.Timestamp {
|
||||||
|
if ts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return timestamppb.New(*ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func digestSliceFromPB(elems []string) []digest.Digest {
|
||||||
|
clone := make([]digest.Digest, len(elems))
|
||||||
|
for i, e := range elems {
|
||||||
|
clone[i] = digest.Digest(e)
|
||||||
|
}
|
||||||
|
return clone
|
||||||
|
}
|
||||||
|
|
||||||
|
func digestSliceToPB(elems []digest.Digest) []string {
|
||||||
|
clone := make([]string, len(elems))
|
||||||
|
for i, e := range elems {
|
||||||
|
clone[i] = string(e)
|
||||||
|
}
|
||||||
|
return clone
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package pb
|
package pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||||
)
|
)
|
||||||
@@ -10,7 +12,7 @@ func CreateSSH(ssh []*SSH) (session.Attachable, error) {
|
|||||||
for _, ssh := range ssh {
|
for _, ssh := range ssh {
|
||||||
cfg := sshprovider.AgentConfig{
|
cfg := sshprovider.AgentConfig{
|
||||||
ID: ssh.ID,
|
ID: ssh.ID,
|
||||||
Paths: append([]string{}, ssh.Paths...),
|
Paths: slices.Clone(ssh.Paths),
|
||||||
}
|
}
|
||||||
configs = append(configs, cfg)
|
configs = append(configs, cfg)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,16 +18,16 @@ type Process struct {
|
|||||||
invokeConfig *pb.InvokeConfig
|
invokeConfig *pb.InvokeConfig
|
||||||
errCh chan error
|
errCh chan error
|
||||||
processCancel func()
|
processCancel func()
|
||||||
serveIOCancel func()
|
serveIOCancel func(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForwardIO forwards process's io to the specified reader/writer.
|
// ForwardIO forwards process's io to the specified reader/writer.
|
||||||
// Optionally specify ioCancelCallback which will be called when
|
// Optionally specify ioCancelCallback which will be called when
|
||||||
// the process closes the specified IO. This will be useful for additional cleanup.
|
// the process closes the specified IO. This will be useful for additional cleanup.
|
||||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func()) {
|
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func(error)) {
|
||||||
p.inEnd.SetIn(in)
|
p.inEnd.SetIn(in)
|
||||||
if f := p.serveIOCancel; f != nil {
|
if f := p.serveIOCancel; f != nil {
|
||||||
f()
|
f(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
p.serveIOCancel = ioCancelCallback
|
p.serveIOCancel = ioCancelCallback
|
||||||
}
|
}
|
||||||
@@ -39,7 +39,7 @@ func (p *Process) Done() <-chan error {
|
|||||||
return p.errCh
|
return p.errCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manager manages a set of proceses.
|
// Manager manages a set of processes.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
container atomic.Value
|
container atomic.Value
|
||||||
processes sync.Map
|
processes sync.Map
|
||||||
@@ -124,9 +124,16 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||||||
f.SetOut(&out)
|
f.SetOut(&out)
|
||||||
|
|
||||||
// Register process
|
// Register process
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||||
var cancelOnce sync.Once
|
var cancelOnce sync.Once
|
||||||
processCancelFunc := func() { cancelOnce.Do(func() { cancel(); f.Close(); in.Close(); out.Close() }) }
|
processCancelFunc := func() {
|
||||||
|
cancelOnce.Do(func() {
|
||||||
|
cancel(errors.WithStack(context.Canceled))
|
||||||
|
f.Close()
|
||||||
|
in.Close()
|
||||||
|
out.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
p := &Process{
|
p := &Process{
|
||||||
inEnd: f,
|
inEnd: f,
|
||||||
invokeConfig: cfg,
|
invokeConfig: cfg,
|
||||||
|
|||||||
@@ -6,8 +6,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/defaults"
|
"github.com/containerd/containerd/v2/defaults"
|
||||||
"github.com/containerd/containerd/pkg/dialer"
|
"github.com/containerd/containerd/v2/pkg/dialer"
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/controller/pb"
|
"github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
@@ -27,6 +28,7 @@ func NewClient(ctx context.Context, addr string) (*Client, error) {
|
|||||||
Backoff: backoffConfig,
|
Backoff: backoffConfig,
|
||||||
}
|
}
|
||||||
gopts := []grpc.DialOption{
|
gopts := []grpc.DialOption{
|
||||||
|
//nolint:staticcheck // ignore SA1019: WithBlock is deprecated and does not work with NewClient.
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
grpc.WithConnectParams(connParams),
|
grpc.WithConnectParams(connParams),
|
||||||
@@ -36,6 +38,7 @@ func NewClient(ctx context.Context, addr string) (*Client, error) {
|
|||||||
grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor),
|
grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor),
|
||||||
grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor),
|
grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor),
|
||||||
}
|
}
|
||||||
|
//nolint:staticcheck // ignore SA1019: Recommended NewClient has different behavior from DialContext.
|
||||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(addr), gopts...)
|
conn, err := grpc.DialContext(ctx, dialer.DialAddress(addr), gopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -72,36 +75,36 @@ func (c *Client) List(ctx context.Context) (keys []string, retErr error) {
|
|||||||
return res.Keys, nil
|
return res.Keys, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Disconnect(ctx context.Context, key string) error {
|
func (c *Client) Disconnect(ctx context.Context, sessionID string) error {
|
||||||
if key == "" {
|
if sessionID == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{Ref: key})
|
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{SessionID: sessionID})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ListProcesses(ctx context.Context, ref string) (infos []*pb.ProcessInfo, retErr error) {
|
func (c *Client) ListProcesses(ctx context.Context, sessionID string) (infos []*pb.ProcessInfo, retErr error) {
|
||||||
res, err := c.client().ListProcesses(ctx, &pb.ListProcessesRequest{Ref: ref})
|
res, err := c.client().ListProcesses(ctx, &pb.ListProcessesRequest{SessionID: sessionID})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return res.Infos, nil
|
return res.Infos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) DisconnectProcess(ctx context.Context, ref, pid string) error {
|
func (c *Client) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
|
||||||
_, err := c.client().DisconnectProcess(ctx, &pb.DisconnectProcessRequest{Ref: ref, ProcessID: pid})
|
_, err := c.client().DisconnectProcess(ctx, &pb.DisconnectProcessRequest{SessionID: sessionID, ProcessID: pid})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Invoke(ctx context.Context, ref string, pid string, invokeConfig pb.InvokeConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
func (c *Client) Invoke(ctx context.Context, sessionID string, pid string, invokeConfig *pb.InvokeConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||||
if ref == "" || pid == "" {
|
if sessionID == "" || pid == "" {
|
||||||
return errors.New("build reference must be specified")
|
return errors.New("build session ID must be specified")
|
||||||
}
|
}
|
||||||
stream, err := c.client().Invoke(ctx)
|
stream, err := c.client().Invoke(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return attachIO(ctx, stream, &pb.InitMessage{Ref: ref, ProcessID: pid, InvokeConfig: &invokeConfig}, ioAttachConfig{
|
return attachIO(ctx, stream, &pb.InitMessage{SessionID: sessionID, ProcessID: pid, InvokeConfig: invokeConfig}, ioAttachConfig{
|
||||||
stdin: in,
|
stdin: in,
|
||||||
stdout: stdout,
|
stdout: stdout,
|
||||||
stderr: stderr,
|
stderr: stderr,
|
||||||
@@ -109,11 +112,11 @@ func (c *Client) Invoke(ctx context.Context, ref string, pid string, invokeConfi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse, error) {
|
func (c *Client) Inspect(ctx context.Context, sessionID string) (*pb.InspectResponse, error) {
|
||||||
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
|
return c.client().Inspect(ctx, &pb.InspectRequest{SessionID: sessionID})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
func (c *Client) Build(ctx context.Context, options *pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
||||||
ref := identity.NewID()
|
ref := identity.NewID()
|
||||||
statusChan := make(chan *client.SolveStatus)
|
statusChan := make(chan *client.SolveStatus)
|
||||||
eg, egCtx := errgroup.WithContext(ctx)
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
@@ -131,10 +134,10 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return ref, resp, eg.Wait()
|
return ref, resp, nil, eg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
|
func (c *Client) build(ctx context.Context, sessionID string, options *pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
|
||||||
eg, egCtx := errgroup.WithContext(ctx)
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
||||||
@@ -143,8 +146,8 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
|
|||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer close(done)
|
defer close(done)
|
||||||
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
|
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
|
||||||
Ref: ref,
|
SessionID: sessionID,
|
||||||
Options: &options,
|
Options: options,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -156,7 +159,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
|
|||||||
})
|
})
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
|
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
|
||||||
Ref: ref,
|
SessionID: sessionID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -181,7 +184,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
|
|||||||
if err := stream.Send(&pb.InputMessage{
|
if err := stream.Send(&pb.InputMessage{
|
||||||
Input: &pb.InputMessage_Init{
|
Input: &pb.InputMessage_Init{
|
||||||
Init: &pb.InputInitMessage{
|
Init: &pb.InputInitMessage{
|
||||||
Ref: ref,
|
SessionID: sessionID,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user