mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-29 23:19:10 +08:00
Compare commits
2132 Commits
v0.9.0-rc2
...
v0.23
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28c90eadc4 | ||
|
|
fddd21c129 | ||
|
|
8d173d192c | ||
|
|
f58f0221e2 | ||
|
|
9696b50d1e | ||
|
|
15495efa86 | ||
|
|
bde47313d4 | ||
|
|
d69301d57b | ||
|
|
ee77cdb175 | ||
|
|
8fb1157b5f | ||
|
|
a34cdff84e | ||
|
|
77139daa4b | ||
|
|
10e3892a63 | ||
|
|
d80ece5bb3 | ||
|
|
1f44971fc9 | ||
|
|
a91db7ccc9 | ||
|
|
98c3abb756 | ||
|
|
3b824a0e39 | ||
|
|
b0156cd631 | ||
|
|
29614f9734 | ||
|
|
f1b895196c | ||
|
|
900502b139 | ||
|
|
49bd7e4edc | ||
|
|
8f9c25e8b0 | ||
|
|
7659798f80 | ||
|
|
7b8bf9f801 | ||
|
|
8efc528b84 | ||
|
|
8593e0397b | ||
|
|
0c0e8eefdf | ||
|
|
e114dd09a5 | ||
|
|
d25e260d2e | ||
|
|
86e4e77ac1 | ||
|
|
534d9fc276 | ||
|
|
e0c67bfc79 | ||
|
|
53e576b306 | ||
|
|
d3aef6642c | ||
|
|
824cef1b92 | ||
|
|
a8b0fa8965 | ||
|
|
45dfb84361 | ||
|
|
13ef01196d | ||
|
|
646df6d4a0 | ||
|
|
d46c1d8141 | ||
|
|
c682742de0 | ||
|
|
391acba718 | ||
|
|
db4b96e62c | ||
|
|
882ef0db91 | ||
|
|
967fc2a696 | ||
|
|
212d598ab1 | ||
|
|
bf95aa3dfa | ||
|
|
18ccba0720 | ||
|
|
f5196f1167 | ||
|
|
ef99381eab | ||
|
|
a41c9fa649 | ||
|
|
00fdcd38ab | ||
|
|
97f1d47464 | ||
|
|
337578242d | ||
|
|
503a8925d2 | ||
|
|
0d708c0bc2 | ||
|
|
3a7523a117 | ||
|
|
5dc1a3308d | ||
|
|
eb78253dfd | ||
|
|
5f8b78a113 | ||
|
|
67d3ed34e4 | ||
|
|
b88423be50 | ||
|
|
c1e2ae5636 | ||
|
|
23afb70e40 | ||
|
|
812b42b329 | ||
|
|
d5d3d3d502 | ||
|
|
e19c729d3e | ||
|
|
aefa49c4fa | ||
|
|
7d927ee604 | ||
|
|
058c098c8c | ||
|
|
7b7dbe88b1 | ||
|
|
cadf4a5893 | ||
|
|
6cd9fef556 | ||
|
|
963b9ca30d | ||
|
|
4636c8051a | ||
|
|
e23695d50d | ||
|
|
6eff9b2d51 | ||
|
|
fcbfc85f42 | ||
|
|
9a204c44c3 | ||
|
|
4c6eba5acd | ||
|
|
fea7459880 | ||
|
|
e2d52a8465 | ||
|
|
48a591b1e1 | ||
|
|
128acdb471 | ||
|
|
411d3f8cea | ||
|
|
7925a96726 | ||
|
|
b06bddfee6 | ||
|
|
fe17ebda89 | ||
|
|
4ed1e07f16 | ||
|
|
f49593ce2c | ||
|
|
4e91fe6507 | ||
|
|
921b576f3a | ||
|
|
548c80ab5a | ||
|
|
f3a4740d5f | ||
|
|
89917dc696 | ||
|
|
f7276201ac | ||
|
|
beb9f515c0 | ||
|
|
4f7d145c0e | ||
|
|
ccdf63c644 | ||
|
|
9a6b8754b1 | ||
|
|
e75ac22ba6 | ||
|
|
62f5cc7c80 | ||
|
|
6272ae1afa | ||
|
|
accfbf6e24 | ||
|
|
af2d8fe555 | ||
|
|
18f4275a92 | ||
|
|
221a608b3c | ||
|
|
cc0391eba5 | ||
|
|
aef388bf7a | ||
|
|
80c16bc28c | ||
|
|
75160643e1 | ||
|
|
ad18ffc018 | ||
|
|
80c3832c94 | ||
|
|
7762ab2c38 | ||
|
|
b973de2dd3 | ||
|
|
352ce7e875 | ||
|
|
cdfc1ed750 | ||
|
|
d0d3433b12 | ||
|
|
b04d39494f | ||
|
|
52f503e806 | ||
|
|
79a978484d | ||
|
|
f7992033bf | ||
|
|
73f61aa338 | ||
|
|
faa573f484 | ||
|
|
0a4a1babd1 | ||
|
|
461bd9e5d1 | ||
|
|
d6fdf83f45 | ||
|
|
ef4e9fea83 | ||
|
|
0c296fe857 | ||
|
|
ef73c64d2c | ||
|
|
1784f84561 | ||
|
|
6a6fa4f422 | ||
|
|
2dc0350ffe | ||
|
|
b85fc5c484 | ||
|
|
2389d457a4 | ||
|
|
3f82aadc6e | ||
|
|
79e3f12305 | ||
|
|
1dc5f0751b | ||
|
|
7ba4da0800 | ||
|
|
a64e628774 | ||
|
|
1c4b1a376c | ||
|
|
e1f690abfc | ||
|
|
03569c2188 | ||
|
|
350d3f0f4b | ||
|
|
dc27815236 | ||
|
|
1089ff7341 | ||
|
|
7433d37183 | ||
|
|
f9a76355b5 | ||
|
|
cfeea34b2d | ||
|
|
ba2d3692a6 | ||
|
|
853b593a4d | ||
|
|
efb300e613 | ||
|
|
cee7b344da | ||
|
|
67dbde6970 | ||
|
|
295653dabb | ||
|
|
f5802119c5 | ||
|
|
40b9ac1ec5 | ||
|
|
f11496448a | ||
|
|
c8c9c72ca6 | ||
|
|
9fe8139022 | ||
|
|
b3e8c62635 | ||
|
|
b8e9c28315 | ||
|
|
3ae9970da5 | ||
|
|
1d219100fc | ||
|
|
464f9278d1 | ||
|
|
7216086b8c | ||
|
|
b195b80ddf | ||
|
|
70a5e266d1 | ||
|
|
689bea7963 | ||
|
|
5176c38115 | ||
|
|
ec440c4574 | ||
|
|
0a4eb7ec76 | ||
|
|
f710c93157 | ||
|
|
d1a0a1497c | ||
|
|
c880ecd513 | ||
|
|
d557da1935 | ||
|
|
417af36abc | ||
|
|
e236b86297 | ||
|
|
633e8a0881 | ||
|
|
5e1ea62f92 | ||
|
|
4b90b84995 | ||
|
|
abc85c38f8 | ||
|
|
ccca7c795a | ||
|
|
04aab6958c | ||
|
|
9d640f0e33 | ||
|
|
b76fdcaf8d | ||
|
|
d693e18c04 | ||
|
|
b066ee1110 | ||
|
|
cf8bf9e104 | ||
|
|
3bd54b19aa | ||
|
|
934841f329 | ||
|
|
b2ababc7b6 | ||
|
|
0ccdb7e248 | ||
|
|
cacb4fb9b3 | ||
|
|
df80bd72c6 | ||
|
|
bb4bef2f04 | ||
|
|
a11507344a | ||
|
|
17af006857 | ||
|
|
11c84973ef | ||
|
|
cc4a291f6a | ||
|
|
aa1fbc0421 | ||
|
|
b2bbb337e4 | ||
|
|
012df71b63 | ||
|
|
a26bb271ab | ||
|
|
3e0682f039 | ||
|
|
3aed658dc4 | ||
|
|
b4a0dee723 | ||
|
|
6904512813 | ||
|
|
d41e335466 | ||
|
|
0954dcb5fd | ||
|
|
38f64bf709 | ||
|
|
c1d3955fbe | ||
|
|
d0b63e60e2 | ||
|
|
e141c8fa71 | ||
|
|
2ee156236b | ||
|
|
1335264c9d | ||
|
|
e74185aa6d | ||
|
|
0224773102 | ||
|
|
8c27b5c545 | ||
|
|
f7594d484b | ||
|
|
f118749cdc | ||
|
|
0d92ad713c | ||
|
|
a18ff4d5ef | ||
|
|
b035a04aaa | ||
|
|
6220e0aae8 | ||
|
|
d9abc78e8f | ||
|
|
3313026961 | ||
|
|
06912aa24c | ||
|
|
cde0e9814d | ||
|
|
2e6e146087 | ||
|
|
af3cbe6cec | ||
|
|
1ef9e67cbb | ||
|
|
75204426bd | ||
|
|
b73f58a90b | ||
|
|
6f5486e718 | ||
|
|
3fa0c3d122 | ||
|
|
b0b902de41 | ||
|
|
77d632e0c5 | ||
|
|
6a12543db3 | ||
|
|
4027b60fa0 | ||
|
|
dda8df3b06 | ||
|
|
d54a110b3c | ||
|
|
44fa243d58 | ||
|
|
630066bfc5 | ||
|
|
026ac2313c | ||
|
|
45fc5ed3b3 | ||
|
|
1eb167a767 | ||
|
|
45d2ec69f1 | ||
|
|
793ec7f3b2 | ||
|
|
6cb62dddf2 | ||
|
|
66ecb53fa7 | ||
|
|
26026810fe | ||
|
|
d3830e0a6e | ||
|
|
8c2759f6ae | ||
|
|
8a472c6c9d | ||
|
|
b98653d8fe | ||
|
|
807d15ff9d | ||
|
|
ac636fd2d8 | ||
|
|
769d22fb30 | ||
|
|
e36535e137 | ||
|
|
ada44e82ea | ||
|
|
16edf5d4aa | ||
|
|
11c85b2369 | ||
|
|
41215835cf | ||
|
|
a41fc81796 | ||
|
|
5f057bdee7 | ||
|
|
883806524a | ||
|
|
38b71998f5 | ||
|
|
07db2be2f0 | ||
|
|
f3f5e760b3 | ||
|
|
e762d3dbca | ||
|
|
4ecbb018f2 | ||
|
|
a8f4699c5e | ||
|
|
7cf12fce98 | ||
|
|
07190d20da | ||
|
|
c79368c199 | ||
|
|
f47d12e692 | ||
|
|
0fc204915a | ||
|
|
3a0eeeacd5 | ||
|
|
e6ce3917d3 | ||
|
|
e085ed8c5c | ||
|
|
b83c3e239e | ||
|
|
a90d5794ee | ||
|
|
c571b9d730 | ||
|
|
af53930206 | ||
|
|
c4a2db8f0c | ||
|
|
206bd6c3a2 | ||
|
|
5c169dd878 | ||
|
|
875e717361 | ||
|
|
72c3d4a237 | ||
|
|
ce46297960 | ||
|
|
e8389c8a02 | ||
|
|
804ee66f13 | ||
|
|
5c5bc510ac | ||
|
|
0dfc4a1019 | ||
|
|
1e992b295c | ||
|
|
4f81bcb5c8 | ||
|
|
3771fe2034 | ||
|
|
5dd4ae0335 | ||
|
|
567361d494 | ||
|
|
21b1be1667 | ||
|
|
876e003685 | ||
|
|
a53ed0a354 | ||
|
|
737da6959d | ||
|
|
6befa70cc8 | ||
|
|
2d051bde96 | ||
|
|
63985b591b | ||
|
|
695200c81a | ||
|
|
828c1dbf98 | ||
|
|
f321d4ac95 | ||
|
|
0d13bf6606 | ||
|
|
3e3242cfdd | ||
|
|
f9e2d07b30 | ||
|
|
c281e18892 | ||
|
|
98d4cb1eb3 | ||
|
|
70f2fb6442 | ||
|
|
fdac6d5fe7 | ||
|
|
d4eca07af8 | ||
|
|
95e77da0fa | ||
|
|
6810a7c69c | ||
|
|
dd596d6542 | ||
|
|
c6e403ad7f | ||
|
|
d6d713aac6 | ||
|
|
f148976e6e | ||
|
|
8f70196de1 | ||
|
|
e196855bed | ||
|
|
71c7889719 | ||
|
|
a3418e0178 | ||
|
|
6a1cf78879 | ||
|
|
ec1f712328 | ||
|
|
5ce6597c07 | ||
|
|
9c75071793 | ||
|
|
d612139b19 | ||
|
|
42f7898c53 | ||
|
|
3148c098a2 | ||
|
|
f95d574f94 | ||
|
|
60822781be | ||
|
|
4c83475703 | ||
|
|
17eff25fe5 | ||
|
|
9c8ffb77d6 | ||
|
|
13a426fca6 | ||
|
|
1a039115bc | ||
|
|
07d58782b8 | ||
|
|
3ccbb88e6a | ||
|
|
a34c641bc4 | ||
|
|
f10be074b4 | ||
|
|
9f429965c0 | ||
|
|
f3929447d7 | ||
|
|
615f4f6759 | ||
|
|
9a7b028bab | ||
|
|
1af4f05ba4 | ||
|
|
4b5d78db9b | ||
|
|
d2c512a95b | ||
|
|
5937ba0e00 | ||
|
|
21fb026aa3 | ||
|
|
bc45641086 | ||
|
|
96689e5d05 | ||
|
|
50a8f11f0f | ||
|
|
11cf38bd97 | ||
|
|
300d56b3ff | ||
|
|
e04da86aca | ||
|
|
9f1fc99018 | ||
|
|
26bbddb5d6 | ||
|
|
58fd190c31 | ||
|
|
e7a53fb829 | ||
|
|
c0fd64f4f8 | ||
|
|
0c629335ac | ||
|
|
f216b71ad2 | ||
|
|
debe8c0187 | ||
|
|
a69d857b8a | ||
|
|
a6ef9db84d | ||
|
|
9c27be752c | ||
|
|
82a65d4f9b | ||
|
|
8647f408ac | ||
|
|
e51cdcac50 | ||
|
|
55a544d976 | ||
|
|
3b943bd4ba | ||
|
|
502bb51a3b | ||
|
|
48977780ad | ||
|
|
e540bb03a4 | ||
|
|
919c52395d | ||
|
|
7f01c63be7 | ||
|
|
076d2f19d5 | ||
|
|
3c3150b8d3 | ||
|
|
b03d8c52e1 | ||
|
|
e67ccb080b | ||
|
|
dab02c347e | ||
|
|
6caa151e98 | ||
|
|
be6d8326a8 | ||
|
|
7855f8324b | ||
|
|
850e5330ad | ||
|
|
b7ea25eb59 | ||
|
|
8cdeac54ab | ||
|
|
752c70a06c | ||
|
|
83dd969dc1 | ||
|
|
a5bb117ff0 | ||
|
|
735b7f68fe | ||
|
|
bcac44f658 | ||
|
|
d46595eed8 | ||
|
|
62407927fa | ||
|
|
c7b0a84c6a | ||
|
|
1aac809c63 | ||
|
|
9b0575b589 | ||
|
|
9f3a578149 | ||
|
|
14b31d8b77 | ||
|
|
e26911f403 | ||
|
|
cd8d61a9d7 | ||
|
|
3a56161d03 | ||
|
|
0fd935b0ca | ||
|
|
704b2cc52d | ||
|
|
6b2dc8ce56 | ||
|
|
a585faf3d2 | ||
|
|
181348397c | ||
|
|
ad371e428e | ||
|
|
f35dae3726 | ||
|
|
6fcc6853d9 | ||
|
|
202c390fca | ||
|
|
ca502cc9a5 | ||
|
|
2bdf451b68 | ||
|
|
658ed584c7 | ||
|
|
886ae21e93 | ||
|
|
cf7a9aa084 | ||
|
|
eb15c667b9 | ||
|
|
1060328a96 | ||
|
|
746eadd16e | ||
|
|
f89f861999 | ||
|
|
08a973a148 | ||
|
|
cc286e2ef5 | ||
|
|
8056a3dc7c | ||
|
|
9f0ebd2643 | ||
|
|
680cdf1179 | ||
|
|
8d32cabc22 | ||
|
|
239930c998 | ||
|
|
8d7f69883f | ||
|
|
1de332530f | ||
|
|
65c4756473 | ||
|
|
d3ff70ace0 | ||
|
|
14de641bec | ||
|
|
1ce3e6a221 | ||
|
|
b1a13bb740 | ||
|
|
64c5139ab6 | ||
|
|
d353f5f6ba | ||
|
|
4507a492da | ||
|
|
9fc6f39d71 | ||
|
|
f6a27a664b | ||
|
|
48153169d8 | ||
|
|
d7de22c61f | ||
|
|
7c91f3d0dd | ||
|
|
820f5e77ed | ||
|
|
1db8f6789f | ||
|
|
b35a0f4718 | ||
|
|
8e47387d02 | ||
|
|
fdda92f304 | ||
|
|
d078a3047d | ||
|
|
f102ad73a8 | ||
|
|
671bd1b54d | ||
|
|
f8657e8798 | ||
|
|
61d9f1d981 | ||
|
|
9eb0318ee6 | ||
|
|
4528269102 | ||
|
|
8d3d32e376 | ||
|
|
c60afbb25b | ||
|
|
9bfa8603f6 | ||
|
|
30e60628bf | ||
|
|
df0270d0cc | ||
|
|
056cf8a7ca | ||
|
|
15c596a091 | ||
|
|
e950b2e478 | ||
|
|
4da753da79 | ||
|
|
3f81293fd4 | ||
|
|
120578091f | ||
|
|
604b723007 | ||
|
|
528181c759 | ||
|
|
cd5381900c | ||
|
|
bba2bb4b89 | ||
|
|
8fd27b8c23 | ||
|
|
6dcc8d8b84 | ||
|
|
9fb8b04b64 | ||
|
|
6ba5802958 | ||
|
|
f039670961 | ||
|
|
4ec12e7e68 | ||
|
|
66ed7d6162 | ||
|
|
617d59d70b | ||
|
|
40f444f4b8 | ||
|
|
8201d301d5 | ||
|
|
40ef3446f5 | ||
|
|
7213b2a814 | ||
|
|
9cfa25ab40 | ||
|
|
6db3444a25 | ||
|
|
15e930b691 | ||
|
|
abc5eaed88 | ||
|
|
f1b92e9e6c | ||
|
|
ad9a5196b3 | ||
|
|
db117855da | ||
|
|
ecfe98df6f | ||
|
|
479177eaf9 | ||
|
|
194f523fe1 | ||
|
|
29d367bdd4 | ||
|
|
ed341bafd0 | ||
|
|
c887c2c62a | ||
|
|
7c481aae20 | ||
|
|
f0f8876902 | ||
|
|
fa1d19bb1e | ||
|
|
7bea00f3dd | ||
|
|
83d5c0c61b | ||
|
|
e58a1d35d1 | ||
|
|
b920b08ad3 | ||
|
|
f369377d74 | ||
|
|
b7486e5cd5 | ||
|
|
5ecff53e0c | ||
|
|
48faab5890 | ||
|
|
f77866f5b4 | ||
|
|
203fd8aee5 | ||
|
|
806ccd3545 | ||
|
|
d6e030eda7 | ||
|
|
96eb69aea4 | ||
|
|
d1d8d6e19c | ||
|
|
dc7f679ab1 | ||
|
|
e403ab2d63 | ||
|
|
b6a2c96926 | ||
|
|
7a7a9c8e01 | ||
|
|
fa8f859159 | ||
|
|
8411a763d9 | ||
|
|
6c5279da54 | ||
|
|
0e64eb4f8b | ||
|
|
adbcc2225e | ||
|
|
e00efeb399 | ||
|
|
d03c13b947 | ||
|
|
4787b5c046 | ||
|
|
1c66f293c7 | ||
|
|
246a36d463 | ||
|
|
a4adae3d6b | ||
|
|
36cd88f8ca | ||
|
|
07a85a544b | ||
|
|
f64b85afe6 | ||
|
|
4b27fb3022 | ||
|
|
38a8261f05 | ||
|
|
a3e6f4be15 | ||
|
|
6467a86427 | ||
|
|
58571ff6d6 | ||
|
|
71174c3041 | ||
|
|
16860e6dd2 | ||
|
|
8e02b1a2f7 | ||
|
|
531c6d4ff1 | ||
|
|
238a3e03dd | ||
|
|
9a0c320588 | ||
|
|
acf0216292 | ||
|
|
5a50d13641 | ||
|
|
2810f20f3a | ||
|
|
e2f6808457 | ||
|
|
39bbb9e478 | ||
|
|
771f0139ac | ||
|
|
6034c58285 | ||
|
|
199890ff51 | ||
|
|
d391b1d3e6 | ||
|
|
f4da6b8f69 | ||
|
|
386d599309 | ||
|
|
d130f8ef0a | ||
|
|
b691a10379 | ||
|
|
e628f9ea14 | ||
|
|
0fb0b6db0d | ||
|
|
6efb1d7cdc | ||
|
|
bc2748da59 | ||
|
|
d4c4632cf6 | ||
|
|
cdd46af015 | ||
|
|
b62d64b2b5 | ||
|
|
64171cb13e | ||
|
|
f28dff7598 | ||
|
|
3d542f3d31 | ||
|
|
30dbdcfa3e | ||
|
|
16518091cd | ||
|
|
897fc91802 | ||
|
|
c4d3011a98 | ||
|
|
a47f761c55 | ||
|
|
aa35c954f3 | ||
|
|
56df4e98a0 | ||
|
|
9f00a9eafa | ||
|
|
56cb197c0a | ||
|
|
466006849a | ||
|
|
738f5ee9db | ||
|
|
9b49cf3ae6 | ||
|
|
bd0b425734 | ||
|
|
7823a2dc01 | ||
|
|
cedbc5d68d | ||
|
|
12d431d1b4 | ||
|
|
ca452c47d8 | ||
|
|
d8f26f79ed | ||
|
|
4304d388ef | ||
|
|
96509847b9 | ||
|
|
52bb668085 | ||
|
|
85cf3bace9 | ||
|
|
b92bfb53d2 | ||
|
|
6c929a45c7 | ||
|
|
d296d5d46a | ||
|
|
6e433da23f | ||
|
|
3005743f7c | ||
|
|
d64d3a4caf | ||
|
|
0d37d68efd | ||
|
|
03a691a0a5 | ||
|
|
fa392a2dca | ||
|
|
470e45e599 | ||
|
|
2a2648b1db | ||
|
|
ac930bda69 | ||
|
|
6791ecb628 | ||
|
|
d717237e4f | ||
|
|
ee642ecc4c | ||
|
|
06d96d665e | ||
|
|
dc83501a5b | ||
|
|
0f74f9a794 | ||
|
|
6d6adc11a1 | ||
|
|
68076909b9 | ||
|
|
7957b73a30 | ||
|
|
1dceb49a27 | ||
|
|
b96ad59f64 | ||
|
|
50aa895477 | ||
|
|
74374ea418 | ||
|
|
6bbe59697a | ||
|
|
c51004e2e4 | ||
|
|
8535c6b455 | ||
|
|
153e5ed274 | ||
|
|
cc097db675 | ||
|
|
35313e865f | ||
|
|
233b869c63 | ||
|
|
7460f049f2 | ||
|
|
8f4c8b094a | ||
|
|
8da28574b0 | ||
|
|
7e49141c4e | ||
|
|
5ec703ba10 | ||
|
|
1ffc6f1d58 | ||
|
|
f65631546d | ||
|
|
6fc19c4024 | ||
|
|
5656c98133 | ||
|
|
263a9ddaee | ||
|
|
1774aa0cf0 | ||
|
|
7b80ad7069 | ||
|
|
c0c4d7172b | ||
|
|
e498ba9c27 | ||
|
|
2e7e7abe42 | ||
|
|
048ef1fbf8 | ||
|
|
cbe7901667 | ||
|
|
f374f64d2f | ||
|
|
4be2259719 | ||
|
|
6627f315cb | ||
|
|
19d838a3f4 | ||
|
|
17878d641e | ||
|
|
63eb73d9cf | ||
|
|
59a0ffcf83 | ||
|
|
2b17f277a1 | ||
|
|
ea7c8e83d2 | ||
|
|
9358c45b46 | ||
|
|
cfb7fc4fb5 | ||
|
|
d4b112ab05 | ||
|
|
f7a32361ea | ||
|
|
af902caeaa | ||
|
|
04000db8da | ||
|
|
b8da14166c | ||
|
|
c1f680df14 | ||
|
|
b6482ab6bb | ||
|
|
6f45b0ea06 | ||
|
|
3971361ed2 | ||
|
|
818045482e | ||
|
|
f8e1746d0d | ||
|
|
92a6799514 | ||
|
|
9358f84668 | ||
|
|
dbdd3601eb | ||
|
|
a3c8a72b54 | ||
|
|
4c3af9becf | ||
|
|
d8c9ebde1f | ||
|
|
01a50aac42 | ||
|
|
f7bcafed21 | ||
|
|
e5ded4b2de | ||
|
|
6ef443de41 | ||
|
|
076e19d0ce | ||
|
|
5599699d29 | ||
|
|
d155747029 | ||
|
|
9cebd0c80f | ||
|
|
7b1ec7211d | ||
|
|
689fd74104 | ||
|
|
0dfd315daa | ||
|
|
9b100c2552 | ||
|
|
92aaaa8f67 | ||
|
|
6111d9a00d | ||
|
|
310aaf1891 | ||
|
|
6c7e65c789 | ||
|
|
66b0abf078 | ||
|
|
6efa26c2de | ||
|
|
5b726afa5e | ||
|
|
009f318bbd | ||
|
|
9f7c8ea3fb | ||
|
|
be12199eb9 | ||
|
|
94355517c4 | ||
|
|
cb1be7214a | ||
|
|
f42a4a1e94 | ||
|
|
4d7365018c | ||
|
|
3d0951b800 | ||
|
|
bcd04d5a64 | ||
|
|
b00001d8ac | ||
|
|
31187735de | ||
|
|
3373a27f1f | ||
|
|
56698805a9 | ||
|
|
4c2e0c4307 | ||
|
|
fb6a3178c9 | ||
|
|
8ca18dee2d | ||
|
|
917d2f4a0a | ||
|
|
366328ba6a | ||
|
|
5f822b36d3 | ||
|
|
e423d096a6 | ||
|
|
927fb6731c | ||
|
|
314ca32446 | ||
|
|
3b25e3fa5c | ||
|
|
41d369120b | ||
|
|
56ffe55f81 | ||
|
|
6d5823beb1 | ||
|
|
c116af7b82 | ||
|
|
fb130243f8 | ||
|
|
29c8107b85 | ||
|
|
ee3baa54f7 | ||
|
|
9de95d81eb | ||
|
|
d3a53189f7 | ||
|
|
0496dae9d5 | ||
|
|
40fcf992b1 | ||
|
|
85c25f719c | ||
|
|
875e4cd52e | ||
|
|
24cedc6c0f | ||
|
|
59f52c9505 | ||
|
|
1e916ae6c6 | ||
|
|
d342cb9d03 | ||
|
|
9fdc99dc76 | ||
|
|
ab835fd904 | ||
|
|
87efbd43b5 | ||
|
|
39db6159f9 | ||
|
|
922328cbaf | ||
|
|
aa0f90fdd6 | ||
|
|
82b6826cd7 | ||
|
|
1e3aec1ae2 | ||
|
|
cfef22ddf0 | ||
|
|
9e5ba66553 | ||
|
|
9ceda78057 | ||
|
|
747b75a217 | ||
|
|
d8de5bb345 | ||
|
|
eff1850d53 | ||
|
|
a24043e9f1 | ||
|
|
0902294e1a | ||
|
|
ef4a165e48 | ||
|
|
89810dc998 | ||
|
|
250cd44d70 | ||
|
|
5afb210d43 | ||
|
|
03f84d2e83 | ||
|
|
945e774a02 | ||
|
|
947d6023e4 | ||
|
|
c58599ca50 | ||
|
|
f30e143428 | ||
|
|
53b7cbc5cb | ||
|
|
9a30215886 | ||
|
|
b1cb658a31 | ||
|
|
bc83ecb538 | ||
|
|
ceaa4534f9 | ||
|
|
9b6c4103af | ||
|
|
4549283f44 | ||
|
|
b2e907d5c2 | ||
|
|
7427adb9b0 | ||
|
|
1a93bbd3a5 | ||
|
|
1f28985d20 | ||
|
|
33a5528003 | ||
|
|
7bfae2b809 | ||
|
|
117c9016e1 | ||
|
|
388af3576a | ||
|
|
2061550bc1 | ||
|
|
abf6c77d91 | ||
|
|
9ad116aa8e | ||
|
|
e3d5e64ec9 | ||
|
|
0808747add | ||
|
|
2e7da01560 | ||
|
|
38d7d36f0a | ||
|
|
55c86543ca | ||
|
|
f98ef00ec7 | ||
|
|
b948b07e2d | ||
|
|
17c0a3794b | ||
|
|
c0a986b43b | ||
|
|
781dcbd196 | ||
|
|
37c4ff0944 | ||
|
|
6211f56b8d | ||
|
|
cc9ea87142 | ||
|
|
035236a5ed | ||
|
|
99777eaf34 | ||
|
|
cf68b5b878 | ||
|
|
3f1aaa68d5 | ||
|
|
f6830f3b86 | ||
|
|
4fc4bc07ae | ||
|
|
f6e57cf5b5 | ||
|
|
b77648d5f8 | ||
|
|
afcb609966 | ||
|
|
946e0a5d74 | ||
|
|
c4db5b252a | ||
|
|
8afeb56a3b | ||
|
|
fd801a12c1 | ||
|
|
2f98e6f3ac | ||
|
|
224c6a59bf | ||
|
|
cbb75bbfd5 | ||
|
|
72085dbdf0 | ||
|
|
480b53f529 | ||
|
|
f8c6a97edc | ||
|
|
d4f088e689 | ||
|
|
db3a8ad7ca | ||
|
|
1d88c4b169 | ||
|
|
6d95fb586e | ||
|
|
1fb5d2a9ee | ||
|
|
ba264138d6 | ||
|
|
6375dc7230 | ||
|
|
9cc6c7df70 | ||
|
|
7ea5cffb98 | ||
|
|
d2d21577fb | ||
|
|
e344e2251b | ||
|
|
833fe3b04f | ||
|
|
d0cc9ed0cb | ||
|
|
b30566438b | ||
|
|
ec98985b4e | ||
|
|
9428447cd2 | ||
|
|
6112c41637 | ||
|
|
a727de7d5f | ||
|
|
4a8fcb7aa0 | ||
|
|
771e66bf7a | ||
|
|
7e0ab1a003 | ||
|
|
e3e16ad088 | ||
|
|
f2823515db | ||
|
|
5ac9b78384 | ||
|
|
fbb0f9b424 | ||
|
|
699fa43f7f | ||
|
|
bdf27ee797 | ||
|
|
171fcbeb69 | ||
|
|
370a5aa127 | ||
|
|
13653fb84d | ||
|
|
1b16594f4a | ||
|
|
3905e8cf06 | ||
|
|
177b95c972 | ||
|
|
74fdbb5e7f | ||
|
|
ac331d3569 | ||
|
|
07c9b45bae | ||
|
|
b91957444b | ||
|
|
46c44c58ae | ||
|
|
6aed54c35a | ||
|
|
126fe653c7 | ||
|
|
f0cbc95eaf | ||
|
|
1a0f9fa96c | ||
|
|
df7a3db947 | ||
|
|
d294232cb5 | ||
|
|
0a7f5c4d94 | ||
|
|
5777d980b5 | ||
|
|
46cf94092c | ||
|
|
da3435ed3a | ||
|
|
3e90cc4b84 | ||
|
|
6418669e75 | ||
|
|
188495aa93 | ||
|
|
54a5c1ff93 | ||
|
|
2e2f9f571f | ||
|
|
d2ac1f2d6e | ||
|
|
7e3acad9f4 | ||
|
|
e04637cf34 | ||
|
|
b9c5f9f1ee | ||
|
|
92ab188781 | ||
|
|
dd4d52407f | ||
|
|
7432b483ce | ||
|
|
6e3164dc6f | ||
|
|
2fdb1682f8 | ||
|
|
7f1eaa2a8a | ||
|
|
fbddc9ebea | ||
|
|
d347499112 | ||
|
|
b1fb67f44a | ||
|
|
a9575a872a | ||
|
|
60f48059a7 | ||
|
|
ffff87be03 | ||
|
|
0a3e5e5257 | ||
|
|
151b0de8f2 | ||
|
|
e40c630758 | ||
|
|
ea3338c3f3 | ||
|
|
744c055560 | ||
|
|
ca0b583f5a | ||
|
|
e7f2da9c4f | ||
|
|
d805c784f2 | ||
|
|
a2866b79e3 | ||
|
|
12e1f65eb3 | ||
|
|
0d6b3a9d1d | ||
|
|
4b3c3c8401 | ||
|
|
ccc314a823 | ||
|
|
dc4b4c36bd | ||
|
|
5c29e6e26e | ||
|
|
6a0d5b771f | ||
|
|
59cc10767e | ||
|
|
b61b29f603 | ||
|
|
7cfef05661 | ||
|
|
4d39259f8e | ||
|
|
15fd39ebec | ||
|
|
a7d59ae332 | ||
|
|
e18a2f6e58 | ||
|
|
38fbd9a85c | ||
|
|
84ddbc2b3b | ||
|
|
b4799f9d16 | ||
|
|
7cded6b33b | ||
|
|
1b36bd0c4a | ||
|
|
7dc5639216 | ||
|
|
858e347306 | ||
|
|
adb9bc86e5 | ||
|
|
ef2e30deba | ||
|
|
c690d460e8 | ||
|
|
35781a6c78 | ||
|
|
de5efcb03b | ||
|
|
5c89004bb6 | ||
|
|
8abef59087 | ||
|
|
4999908fbc | ||
|
|
4af0ed5159 | ||
|
|
a4a8846e46 | ||
|
|
520dc5968a | ||
|
|
324afe60ad | ||
|
|
c0c3a55fca | ||
|
|
2a30229916 | ||
|
|
ed76661b0d | ||
|
|
a0cce9b31e | ||
|
|
d410597f5a | ||
|
|
9016d85718 | ||
|
|
2565c74a89 | ||
|
|
eab5cccbb4 | ||
|
|
e2be765e7b | ||
|
|
276dd5150f | ||
|
|
5c69fa267f | ||
|
|
b240a00def | ||
|
|
a8af6fa013 | ||
|
|
7eb3dfbd22 | ||
|
|
4b24f66a10 | ||
|
|
8d5b967f2d | ||
|
|
8842e19869 | ||
|
|
a0ce8bec97 | ||
|
|
84d79df93b | ||
|
|
df4b13320d | ||
|
|
bb511110d6 | ||
|
|
47cf4a5dbe | ||
|
|
cfbed42fa7 | ||
|
|
ff27ab7e86 | ||
|
|
5655e5e2b6 | ||
|
|
4b516af1f6 | ||
|
|
b1490ed5ce | ||
|
|
ea830c9758 | ||
|
|
8f576e5790 | ||
|
|
4327ee73b1 | ||
|
|
70a28fed12 | ||
|
|
fc22d39d6d | ||
|
|
1cc5e39cb8 | ||
|
|
1815e4d9b2 | ||
|
|
2ec1dbd1b6 | ||
|
|
a6163470b7 | ||
|
|
3dfb102f82 | ||
|
|
253cbee5c7 | ||
|
|
c1dfa74b98 | ||
|
|
647491dd99 | ||
|
|
9a71895a48 | ||
|
|
abff444562 | ||
|
|
1d0b542b1b | ||
|
|
6c485a98be | ||
|
|
9ebfde4897 | ||
|
|
e4ee2ca1fd | ||
|
|
849456c198 | ||
|
|
9a2536dd0d | ||
|
|
a03263acf8 | ||
|
|
0c0dcb7c8c | ||
|
|
9bce433154 | ||
|
|
04f0fc5871 | ||
|
|
e7da2b0686 | ||
|
|
eab565afe7 | ||
|
|
7d952441ea | ||
|
|
835a6b1096 | ||
|
|
e273a53c88 | ||
|
|
dcdcce6c52 | ||
|
|
c5b4ce9e7b | ||
|
|
8f484f6ac1 | ||
|
|
b748185f48 | ||
|
|
a6228ed78f | ||
|
|
fcbe2803c8 | ||
|
|
83c30c6c5a | ||
|
|
8db86e4031 | ||
|
|
e705cafcd5 | ||
|
|
32f17b0de1 | ||
|
|
d40c4bb046 | ||
|
|
25f8011825 | ||
|
|
d0f9655aa2 | ||
|
|
ce9a486a0e | ||
|
|
85abcc413e | ||
|
|
e5acb010c9 | ||
|
|
79f50ad924 | ||
|
|
5723ceefb6 | ||
|
|
95185e9525 | ||
|
|
e423a67f7b | ||
|
|
545a5c97c6 | ||
|
|
625d90b983 | ||
|
|
9999fc63e8 | ||
|
|
303e509bbf | ||
|
|
ae0a5e495a | ||
|
|
2edb7a04a9 | ||
|
|
a0599c1c31 | ||
|
|
eedf9f10e8 | ||
|
|
d891634fc6 | ||
|
|
af75d0bd7d | ||
|
|
e008b846bb | ||
|
|
fd11d93381 | ||
|
|
aa518f9b88 | ||
|
|
b16bd02f95 | ||
|
|
69bd408964 | ||
|
|
d8e9c7f5b5 | ||
|
|
fd54daf184 | ||
|
|
9057bd27af | ||
|
|
5a466918f9 | ||
|
|
56fc68eb7e | ||
|
|
ccfcf4bc37 | ||
|
|
560eaf0e78 | ||
|
|
daaa8f2482 | ||
|
|
97052cf203 | ||
|
|
2eccaadce5 | ||
|
|
aa4317bfce | ||
|
|
953cbf6696 | ||
|
|
414f215929 | ||
|
|
698eb840a3 | ||
|
|
714b85aaaf | ||
|
|
fb604d4b57 | ||
|
|
73d8969158 | ||
|
|
64e2b2532a | ||
|
|
c2befc0c12 | ||
|
|
345551ae0d | ||
|
|
97e8fa7aaf | ||
|
|
cdfc35d0b6 | ||
|
|
ce66d8830d | ||
|
|
fe08cf2981 | ||
|
|
c9d1c41d20 | ||
|
|
bda968ad5d | ||
|
|
481384b185 | ||
|
|
67d9385ce0 | ||
|
|
598bc16e5d | ||
|
|
760244ee3e | ||
|
|
d0177c6da3 | ||
|
|
8f8ed68b61 | ||
|
|
981cc8c2aa | ||
|
|
9822409b67 | ||
|
|
328666dc6a | ||
|
|
42d2719b08 | ||
|
|
3b33ac48d2 | ||
|
|
e0303dd65a | ||
|
|
dab7af617a | ||
|
|
0326d2a5b1 | ||
|
|
b4c81a4d27 | ||
|
|
7b3c4fc714 | ||
|
|
43ed470208 | ||
|
|
089982153f | ||
|
|
7393650008 | ||
|
|
b36c5196dd | ||
|
|
1484862a50 | ||
|
|
e5c3fa5293 | ||
|
|
2c58e6003f | ||
|
|
30ae5ceb6e | ||
|
|
6ffb77dcda | ||
|
|
2c1f46450a | ||
|
|
052f279de7 | ||
|
|
89684021b3 | ||
|
|
95bdecc145 | ||
|
|
082d5d70b2 | ||
|
|
5b75930a6d | ||
|
|
e41ab8d10d | ||
|
|
4b408c79fe | ||
|
|
cff7baff1c | ||
|
|
5130700981 | ||
|
|
13beda8b11 | ||
|
|
8babd5a147 | ||
|
|
cb856682e9 | ||
|
|
c65b7ed24f | ||
|
|
2c3d7dab3f | ||
|
|
13467c1f5d | ||
|
|
d0c4bed484 | ||
|
|
dbaad32f49 | ||
|
|
528e3ba259 | ||
|
|
1ff261d38e | ||
|
|
bef5d567b0 | ||
|
|
da95d9f0ca | ||
|
|
7206e2d179 | ||
|
|
736094794c | ||
|
|
a399a97949 | ||
|
|
62a416fe12 | ||
|
|
f6564c3147 | ||
|
|
b49911416c | ||
|
|
22c2538466 | ||
|
|
1861405b1e | ||
|
|
c9aeca19ce | ||
|
|
59827f5c27 | ||
|
|
827622421e | ||
|
|
f0c5dfaf48 | ||
|
|
703c765ec8 | ||
|
|
fb2c62a038 | ||
|
|
eabbee797b | ||
|
|
7e4021a43d | ||
|
|
2478f300aa | ||
|
|
620c57c86c | ||
|
|
8bea1cb417 | ||
|
|
147c7135b0 | ||
|
|
650a7af0ae | ||
|
|
4f738020fd | ||
|
|
d852568a29 | ||
|
|
68c3ac4f66 | ||
|
|
38afdf1f52 | ||
|
|
b2e723e2a3 | ||
|
|
02c2073feb | ||
|
|
61dff684ad | ||
|
|
78adfc80a9 | ||
|
|
7c590ecb9a | ||
|
|
24e043e375 | ||
|
|
7094eb86c9 | ||
|
|
81ea718ea4 | ||
|
|
9060cab077 | ||
|
|
3cd6d8d6e4 | ||
|
|
ba43fe08f4 | ||
|
|
6b63e7e3de | ||
|
|
57d737a13c | ||
|
|
671347dc35 | ||
|
|
02bc4e8992 | ||
|
|
1cdefbe901 | ||
|
|
7694f0b9d8 | ||
|
|
fa9126c61f | ||
|
|
ebae070f7e | ||
|
|
617f538cb3 | ||
|
|
0f45b629ad | ||
|
|
ac5b3241b1 | ||
|
|
ee24a36c4f | ||
|
|
8484fcdd57 | ||
|
|
45deb29f09 | ||
|
|
6641167e7d | ||
|
|
9f4987997c | ||
|
|
8337c25fa4 | ||
|
|
6b048e2316 | ||
|
|
54a1f0f0ea | ||
|
|
57dc45774a | ||
|
|
9d8ac1ce2d | ||
|
|
0a0252d9b3 | ||
|
|
c6535e9675 | ||
|
|
d762c76a68 | ||
|
|
1091707bd5 | ||
|
|
a4c392f4db | ||
|
|
e4880c5dd1 | ||
|
|
b2510c6b94 | ||
|
|
5b5c4c8c9d | ||
|
|
ceb5bc807c | ||
|
|
b2f705ad71 | ||
|
|
6028094e6b | ||
|
|
9516ce8e25 | ||
|
|
d82637582c | ||
|
|
1e80c70990 | ||
|
|
54032316f9 | ||
|
|
aac7a47469 | ||
|
|
aa0aeac297 | ||
|
|
cec4496d3b | ||
|
|
9368ecb67e | ||
|
|
20c947990c | ||
|
|
eeeff1cf23 | ||
|
|
752680e289 | ||
|
|
5bf02d9f7b | ||
|
|
0962fdbb04 | ||
|
|
1f5562315b | ||
|
|
a102d33738 | ||
|
|
940e0a4a3c | ||
|
|
a978b2b7a3 | ||
|
|
1326634c7d | ||
|
|
7a724ac445 | ||
|
|
55e164a540 | ||
|
|
707ae87060 | ||
|
|
cb37886658 | ||
|
|
c855277d53 | ||
|
|
898a8eeddf | ||
|
|
c857eaa380 | ||
|
|
55db25c21c | ||
|
|
f353814390 | ||
|
|
271a467612 | ||
|
|
b3b8c62ad4 | ||
|
|
eacf2bdf3d | ||
|
|
d537b9e418 | ||
|
|
616fb3e55c | ||
|
|
da5f853b44 | ||
|
|
4932eecc3f | ||
|
|
7f93616ff1 | ||
|
|
ab58333311 | ||
|
|
9efaa2793d | ||
|
|
80aa28f75c | ||
|
|
8819ac1b65 | ||
|
|
0408f3ac45 | ||
|
|
7683ef9137 | ||
|
|
3f423468df | ||
|
|
ff8bca206b | ||
|
|
08a70ecdcc | ||
|
|
d83da63320 | ||
|
|
639e0bc5ed | ||
|
|
d0a9a81e2e | ||
|
|
de1a560f07 | ||
|
|
e168fd826c | ||
|
|
2f1b7a0131 | ||
|
|
f3871b158f | ||
|
|
deb9dbe9bb | ||
|
|
6f71ea8904 | ||
|
|
e437f7ba04 | ||
|
|
abfc04f621 | ||
|
|
612dfdd813 | ||
|
|
ee19ce5ef2 | ||
|
|
23c2498dee | ||
|
|
390eedc50b | ||
|
|
adc839aa40 | ||
|
|
7838ade9f3 | ||
|
|
c043c9229e | ||
|
|
05a0fdf744 | ||
|
|
dfb557b34f | ||
|
|
d0d8bfbca4 | ||
|
|
21e4eb89b2 | ||
|
|
14834e6085 | ||
|
|
267e30a19c | ||
|
|
be4fd7110d | ||
|
|
24668122d9 | ||
|
|
31d021a9ca | ||
|
|
7497e6481e | ||
|
|
de9d253f45 | ||
|
|
f4f511201b | ||
|
|
beca8b6adf | ||
|
|
457dc402d3 | ||
|
|
34b9a629a0 | ||
|
|
ad674e2666 | ||
|
|
503d483731 | ||
|
|
6e5aefbb98 | ||
|
|
7d2c9d5ef5 | ||
|
|
1734abbb76 | ||
|
|
b06a55cf53 | ||
|
|
38137b29dd | ||
|
|
fc7144f61d | ||
|
|
ac93a7fbfb | ||
|
|
48f9b86b9a | ||
|
|
6c32a8c4c1 | ||
|
|
7a08248c4e | ||
|
|
05af608774 | ||
|
|
511e41386f | ||
|
|
fd251d2a7b | ||
|
|
5836c24e7d | ||
|
|
c8f8a106ed | ||
|
|
198764f116 | ||
|
|
0dd89f6029 | ||
|
|
8da8ee2aea | ||
|
|
6db8569f09 | ||
|
|
5a0e4c1023 | ||
|
|
ded91da575 | ||
|
|
508b2ef0c6 | ||
|
|
05b8821625 | ||
|
|
01245e72ab | ||
|
|
22e9e3342b | ||
|
|
0e3911147a | ||
|
|
2aa6d52b06 | ||
|
|
561a4330cf | ||
|
|
7b4bc4f00a | ||
|
|
a012e0043b | ||
|
|
2c2294fa43 | ||
|
|
197824c6f2 | ||
|
|
22e61ef06f | ||
|
|
159eac42f3 | ||
|
|
6c77b76b7b | ||
|
|
130e9fe093 | ||
|
|
e9fb769c60 | ||
|
|
3dcb03452c | ||
|
|
9b7d30c9a0 | ||
|
|
2134a1e104 | ||
|
|
cc6957d1cc | ||
|
|
0878d5b22b | ||
|
|
c8002e58a4 | ||
|
|
cfcd1d9420 | ||
|
|
e6756d951a | ||
|
|
b9aad03e7a | ||
|
|
0bd6f3c7f5 | ||
|
|
e2ebab5f26 | ||
|
|
e018f8b6fb | ||
|
|
03bedfb3c3 | ||
|
|
bdaaca40a2 | ||
|
|
bc021c89a8 | ||
|
|
798402314c | ||
|
|
7cfb440136 | ||
|
|
80358842c4 | ||
|
|
77aedb751e | ||
|
|
739ec964db | ||
|
|
320a3109f3 | ||
|
|
2c986bc184 | ||
|
|
6c31f43cc9 | ||
|
|
7b049b99c5 | ||
|
|
bf5a70023c | ||
|
|
8d001e338f | ||
|
|
73ea0826ca | ||
|
|
66e6dab26b | ||
|
|
0138f2a00f | ||
|
|
a59058e8a5 | ||
|
|
f6b7a3c522 | ||
|
|
8fe2070d10 | ||
|
|
54bb799d15 | ||
|
|
957044825f | ||
|
|
42a0f3d504 | ||
|
|
84ad208985 | ||
|
|
3631dc17c9 | ||
|
|
bafdc63b8c | ||
|
|
51c94cd2a6 | ||
|
|
31d88398bc | ||
|
|
fbf6594758 | ||
|
|
f54a67de6d | ||
|
|
f35b2b7cab | ||
|
|
29ba5ecef6 | ||
|
|
fb50d82fd8 | ||
|
|
87e8e4b847 | ||
|
|
a71a24c0f4 | ||
|
|
76119b0f61 | ||
|
|
7843b5f417 | ||
|
|
da6662975f | ||
|
|
de4dbb7d00 | ||
|
|
3bd4bca994 | ||
|
|
296832c90e | ||
|
|
56d55a4137 | ||
|
|
626e6f8fa3 | ||
|
|
5941bf0494 | ||
|
|
29a496cdab | ||
|
|
a43d9a67c7 | ||
|
|
c47eb3bf5a | ||
|
|
a97e1641a4 | ||
|
|
86ae8ea854 | ||
|
|
d37d483097 | ||
|
|
4e96faa201 | ||
|
|
e5419ef6d7 | ||
|
|
14747a490a | ||
|
|
e5cee892ed | ||
|
|
ef4b984df4 | ||
|
|
a8f402e28d | ||
|
|
2eba99b40b | ||
|
|
7686fa1f16 | ||
|
|
51b9bab245 | ||
|
|
6b5758f4cd | ||
|
|
bd375a14a8 | ||
|
|
b01693f63e | ||
|
|
4a059d5144 | ||
|
|
f3775c0046 | ||
|
|
50fbdd86f9 | ||
|
|
1f61de0fcc | ||
|
|
e206c585bb | ||
|
|
5e46d8057d | ||
|
|
4e7709e54c | ||
|
|
5ed8f1b7d9 | ||
|
|
1d12c1f5b3 | ||
|
|
3ef93e081c | ||
|
|
18894a8e3a | ||
|
|
13ec635988 | ||
|
|
f804b8fa4b | ||
|
|
21a55ff9a1 | ||
|
|
dd350284df | ||
|
|
c010d3de8d | ||
|
|
d11dbbf9f7 | ||
|
|
75cdceb9f1 | ||
|
|
10ff93f190 | ||
|
|
bf00185809 | ||
|
|
90f03e57c2 | ||
|
|
a59fd3ebfe | ||
|
|
3eb490153d | ||
|
|
d957d8b987 | ||
|
|
5a1f252bd9 | ||
|
|
ab4585f38c | ||
|
|
3003045c0b | ||
|
|
a6f3f290b4 | ||
|
|
27d072a099 | ||
|
|
8e3df1943c | ||
|
|
8c54de66ce | ||
|
|
06b9ac2dc4 | ||
|
|
b8739d7441 | ||
|
|
23fe02993b | ||
|
|
1d177f00d2 | ||
|
|
ceaba7011f | ||
|
|
9c06f383ba | ||
|
|
e11c5e3e96 | ||
|
|
f5719f3017 | ||
|
|
163babdca7 | ||
|
|
094d1aded8 | ||
|
|
05ef20b434 | ||
|
|
cc718b3444 | ||
|
|
e98e8f6ac9 | ||
|
|
36541ed9d5 | ||
|
|
418ea82d3a | ||
|
|
130bbda00e | ||
|
|
2666bd6996 | ||
|
|
ff2c8da803 | ||
|
|
e094296f37 | ||
|
|
7c3b77fb36 | ||
|
|
fb4c4f07ca | ||
|
|
b9e25e82cf | ||
|
|
089036da29 | ||
|
|
1123bfed10 | ||
|
|
7f2293308b | ||
|
|
a65131f9d3 | ||
|
|
8a3a646c61 | ||
|
|
4384947be1 | ||
|
|
69421182ca | ||
|
|
068382f5df | ||
|
|
c4bec05466 | ||
|
|
89e1ac0a6e | ||
|
|
b84e0e11b4 | ||
|
|
d95f5f8f3b | ||
|
|
b4c0941683 | ||
|
|
cf9798cede | ||
|
|
20d2501edc | ||
|
|
d45601fdc6 | ||
|
|
c81a9a89cf | ||
|
|
87b9f9ecfb | ||
|
|
cbc473359a | ||
|
|
2eba60db75 | ||
|
|
0dcbed3f53 | ||
|
|
ca08eb65e2 | ||
|
|
6f37d9bee7 | ||
|
|
e65f6b8c8b | ||
|
|
707dc43d55 | ||
|
|
8cbb7a9319 | ||
|
|
4f5a56aadb | ||
|
|
399beb53d9 | ||
|
|
7dec9fd6e7 | ||
|
|
120f3a8918 | ||
|
|
bd672eaf5b | ||
|
|
c2500ea2d8 | ||
|
|
a4663b4b2e | ||
|
|
57c618b83a | ||
|
|
b3a4f95110 | ||
|
|
28a1eb3527 | ||
|
|
75ecc15958 | ||
|
|
2235ebce2f | ||
|
|
7147463418 | ||
|
|
010e4c8d54 | ||
|
|
6f394a0691 | ||
|
|
efd7279118 | ||
|
|
601056f3a7 | ||
|
|
0a7f96cbfb | ||
|
|
1c530c2fe0 | ||
|
|
1e576dd7c6 | ||
|
|
7a5472153b | ||
|
|
b986ce566b | ||
|
|
daba16f4be | ||
|
|
ee36e2264e | ||
|
|
329e98d9f0 | ||
|
|
f4513f7028 | ||
|
|
b1c5449428 | ||
|
|
431732f5d1 | ||
|
|
687feca9e8 | ||
|
|
d4a2c8d0c3 | ||
|
|
bef42b2441 | ||
|
|
2de333fdd3 | ||
|
|
1138789f20 | ||
|
|
1f4ac09ffb | ||
|
|
26a8ffb393 | ||
|
|
9b7aada99b | ||
|
|
fd6207695b | ||
|
|
def96d2bf4 | ||
|
|
f5f00e68ef | ||
|
|
14aebe713e | ||
|
|
9d2388e6f5 | ||
|
|
75e2c46295 | ||
|
|
2c02db8db4 | ||
|
|
e304a05d2a | ||
|
|
14c1ea0e11 | ||
|
|
c30bcade2c | ||
|
|
62bfb19db4 | ||
|
|
47e34f2684 | ||
|
|
3d981be4ad | ||
|
|
5d94b0fcc7 | ||
|
|
569c66fb62 | ||
|
|
93f7fbdd78 | ||
|
|
ea06685c11 | ||
|
|
eaba4fa9e6 | ||
|
|
99e3882e2a | ||
|
|
0a2f35970c | ||
|
|
ab5f5e4169 | ||
|
|
696770d29c | ||
|
|
b47b4e5957 | ||
|
|
9a125afba0 | ||
|
|
d34103b0d9 | ||
|
|
c820350b5e | ||
|
|
61a7854659 | ||
|
|
e859ebc12e | ||
|
|
ef997fd6d0 | ||
|
|
76c96347ff | ||
|
|
48d7dafbd5 | ||
|
|
d03e93f6f1 | ||
|
|
fcb7810a38 | ||
|
|
459d94bdf1 | ||
|
|
7cef021a8a | ||
|
|
c6db4cf342 | ||
|
|
6c9436fbd5 | ||
|
|
a906149930 | ||
|
|
af328fe413 | ||
|
|
183a73abae | ||
|
|
b7f0b3d763 | ||
|
|
5b27d5a9f6 | ||
|
|
8f24c58f4d | ||
|
|
cd1648192e | ||
|
|
8d822fb06c | ||
|
|
0758a9b75d | ||
|
|
f8fa526678 | ||
|
|
4abff3ce12 | ||
|
|
e7034f66bc | ||
|
|
8c65e4fc1d | ||
|
|
d196ac347e | ||
|
|
9b723ece46 | ||
|
|
5e2f8bd64a | ||
|
|
5788ab33d2 | ||
|
|
f1788002e1 | ||
|
|
6c62225d1b | ||
|
|
38b4eef5c6 | ||
|
|
a4db138c5e | ||
|
|
55377b2b0f | ||
|
|
98dedd3225 | ||
|
|
74b121be66 | ||
|
|
b9cf46785b | ||
|
|
ecf8dd0a26 | ||
|
|
73c17ef4d2 | ||
|
|
e762e46b4b | ||
|
|
cafeedba79 | ||
|
|
17bdbbd3c3 | ||
|
|
2dae553d18 | ||
|
|
91c17f25fb | ||
|
|
63fc01e08a | ||
|
|
354ccc9469 | ||
|
|
68ae67720a | ||
|
|
b273db20c3 | ||
|
|
0ae88ecc4d | ||
|
|
341fb65f6f | ||
|
|
69a9c6609a | ||
|
|
1c96fdaf03 | ||
|
|
c77bd8a578 | ||
|
|
e5f701351c | ||
|
|
09798cdebd | ||
|
|
0dfc35d558 | ||
|
|
8085f57a3a | ||
|
|
d582a21acd | ||
|
|
580820a4de | ||
|
|
b7e8afc61b | ||
|
|
a8a637e19d | ||
|
|
79632a4c4c | ||
|
|
a6b0959276 | ||
|
|
6d7142b057 | ||
|
|
d0bff18cee | ||
|
|
7e39644f69 | ||
|
|
adc6349b28 | ||
|
|
f558fd8b22 | ||
|
|
432e16ef70 | ||
|
|
8c86c2242a | ||
|
|
75ad5d732b | ||
|
|
9bd0202312 | ||
|
|
367f114cc7 | ||
|
|
2959ce205e | ||
|
|
75b5c6560f | ||
|
|
4429ccbcc2 | ||
|
|
c59fc18325 | ||
|
|
4ce80856b3 | ||
|
|
af3feec4ea | ||
|
|
90c849f5ef | ||
|
|
6024212ac8 | ||
|
|
2d124e0ce9 | ||
|
|
e61a8cf637 | ||
|
|
167cd16acb | ||
|
|
1dd31fefcb | ||
|
|
5a12b25bab | ||
|
|
b702188b65 | ||
|
|
060ac842bb | ||
|
|
31d1b778ff | ||
|
|
1cd4b54810 | ||
|
|
c54926c5b2 | ||
|
|
10aea8e970 | ||
|
|
be6542911f | ||
|
|
9b07f6510a | ||
|
|
9ee19520dd | ||
|
|
878faae332 | ||
|
|
eaf38570e7 | ||
|
|
167340df17 | ||
|
|
e61a1da7fc | ||
|
|
f8483d7243 | ||
|
|
2c8a9aad76 | ||
|
|
32009a701c | ||
|
|
0cbc316f76 | ||
|
|
45fccef3f3 | ||
|
|
fdcb4e2fb9 | ||
|
|
4a0a67d7a2 | ||
|
|
855d49ff58 | ||
|
|
384e873db0 | ||
|
|
60e72ba989 | ||
|
|
45a2ae6762 | ||
|
|
2eeef180ea | ||
|
|
8fd81f5cfd | ||
|
|
1eb9ad979e | ||
|
|
77e0e860f8 | ||
|
|
e228c398f4 | ||
|
|
5d06406f26 | ||
|
|
cb061b684c | ||
|
|
29b427ce13 | ||
|
|
4fa7cd1fc2 | ||
|
|
12b6a3ad9a | ||
|
|
22e1901581 | ||
|
|
e23c37fa96 | ||
|
|
e5a0ed1149 | ||
|
|
c9c1303e31 | ||
|
|
ae3299d9d4 | ||
|
|
a948cc14c5 | ||
|
|
621b07c799 | ||
|
|
7ad970f93a | ||
|
|
437fe55104 | ||
|
|
bebd244e33 | ||
|
|
9f2143e3df | ||
|
|
98efe7af10 | ||
|
|
c7c37c3591 | ||
|
|
a43837d26c | ||
|
|
f115abb509 | ||
|
|
43a07f3997 | ||
|
|
41e1693be0 | ||
|
|
9d5af461b2 | ||
|
|
b38c9c7db4 | ||
|
|
9f884edbbf | ||
|
|
0a7a2b1882 | ||
|
|
6bec8f6e00 | ||
|
|
65037e4611 | ||
|
|
ba92989a94 | ||
|
|
2bf996d9ad | ||
|
|
75ed3e296b | ||
|
|
e14e0521cf | ||
|
|
28e6995f7c | ||
|
|
8f72fb353c | ||
|
|
14f5d490ef | ||
|
|
c9095e8eab | ||
|
|
0589f69206 | ||
|
|
b724a173a9 | ||
|
|
e5ccb64617 | ||
|
|
08d114195f | ||
|
|
caf7d2ec9b | ||
|
|
2dffed3f3a | ||
|
|
784dc2223d | ||
|
|
c3fd1e8b79 | ||
|
|
6f0c550ee9 | ||
|
|
5d551dbbc1 | ||
|
|
043cb3a0db | ||
|
|
16d5b38f2b | ||
|
|
956a1be656 | ||
|
|
afcaa8df5f | ||
|
|
12885c01ad | ||
|
|
2ab8749052 | ||
|
|
e826141af4 | ||
|
|
0c1fd31226 | ||
|
|
0e9804901b | ||
|
|
2402607846 | ||
|
|
3d49bbd23a | ||
|
|
33b1fdbf39 | ||
|
|
de4cdab411 | ||
|
|
a7e471b7b3 | ||
|
|
ba6e5cddb0 | ||
|
|
e4ff82f864 | ||
|
|
48b733d6da | ||
|
|
0b432cc5f2 | ||
|
|
f6cccefffc | ||
|
|
fd5d90c699 | ||
|
|
06399630a2 | ||
|
|
20693aa808 | ||
|
|
f373b91cc3 | ||
|
|
ce48b1ae84 | ||
|
|
b3340cc7ba | ||
|
|
1303715aba | ||
|
|
b716e48926 | ||
|
|
7d35a3b8d8 | ||
|
|
200058b505 | ||
|
|
566f41b598 | ||
|
|
6c0547e7e6 | ||
|
|
871f865ac8 | ||
|
|
62a21520ea | ||
|
|
a597266a52 | ||
|
|
14b66817fb | ||
|
|
af011d6ca3 | ||
|
|
8a02cf8717 | ||
|
|
672eeed9a6 | ||
|
|
1b816ff838 | ||
|
|
10365ddf22 | ||
|
|
a28cb1491d | ||
|
|
1e149bb84f | ||
|
|
9827abbf76 | ||
|
|
a3293cdaaa | ||
|
|
f7d8bd2055 | ||
|
|
5d33a3af22 | ||
|
|
87f900ce77 | ||
|
|
bb5c93cafc | ||
|
|
c6ce0964b9 | ||
|
|
5c21e80a83 | ||
|
|
498cc9ba0a | ||
|
|
805f3a199d | ||
|
|
91fdb0423d | ||
|
|
8ba8659496 | ||
|
|
16e41ba297 | ||
|
|
387ce5be7c | ||
|
|
87a120e8e3 | ||
|
|
589d4e4cf5 | ||
|
|
6535f16aec | ||
|
|
a1520ea1b2 | ||
|
|
0844213897 | ||
|
|
989ba55d9a | ||
|
|
33388d6ede | ||
|
|
bfadbecb96 | ||
|
|
f815f4acf7 | ||
|
|
81d7decd13 | ||
|
|
d699d08399 | ||
|
|
9541457c54 | ||
|
|
c6cdcb02cf | ||
|
|
799715ea24 | ||
|
|
b5c6b3f10b | ||
|
|
3f59b27cf4 | ||
|
|
00b18558dd | ||
|
|
948414e1b2 | ||
|
|
56876ab825 | ||
|
|
0806870261 | ||
|
|
fd8eaab2df | ||
|
|
77252f161c | ||
|
|
4437802e63 | ||
|
|
1613fde55c | ||
|
|
624bc064d8 | ||
|
|
0c4a68555e | ||
|
|
476ac18d2c | ||
|
|
8ad30d0a35 | ||
|
|
780531425b | ||
|
|
92d2dc8263 | ||
|
|
cfa6b4f7c8 | ||
|
|
5d4223e4f8 | ||
|
|
4a73abfd64 | ||
|
|
6f722da04d | ||
|
|
527d57540e | ||
|
|
b65f49622e | ||
|
|
c5ce08bf3c | ||
|
|
71b35ae42e | ||
|
|
15eb6418e8 | ||
|
|
2a83723d57 | ||
|
|
e8f55a3cf7 | ||
|
|
b5ea989eee | ||
|
|
17105bfc50 | ||
|
|
eefe27ff42 | ||
|
|
1ea71e358a | ||
|
|
14d8f95ec9 | ||
|
|
b0728c96d3 | ||
|
|
5e685c0e04 | ||
|
|
f2ac30f431 | ||
|
|
6808c0e585 | ||
|
|
9de12bb9c8 | ||
|
|
0645acfd79 | ||
|
|
439d58ddbd | ||
|
|
c0a9274d64 | ||
|
|
f3a4cd5176 | ||
|
|
c2e11196dd | ||
|
|
0b8f0264b0 | ||
|
|
5c31d855fd | ||
|
|
90d7fb5e77 | ||
|
|
c4ad930e2a | ||
|
|
3d0c88695e | ||
|
|
7332140fdf | ||
|
|
132fababb0 | ||
|
|
71507c0b58 | ||
|
|
7888fdee58 | ||
|
|
fb61fde581 | ||
|
|
5258e44030 | ||
|
|
e16c1b289b | ||
|
|
376b73f078 | ||
|
|
1c6060f27d | ||
|
|
ed4fd965ff | ||
|
|
bc9cb2c66a | ||
|
|
aa05f4c207 | ||
|
|
62fbef22d0 | ||
|
|
2563685d27 | ||
|
|
598f1f0a62 | ||
|
|
8311b0963a | ||
|
|
b1949b7388 | ||
|
|
3341bd1740 | ||
|
|
74f64f88a7 | ||
|
|
d4a4aaf509 | ||
|
|
1f73f4fd5d | ||
|
|
77f83d4171 | ||
|
|
642f28f439 | ||
|
|
54f4dc8f6e | ||
|
|
89d99b1694 | ||
|
|
9753f63f57 | ||
|
|
04804ff355 | ||
|
|
ed9ea2476d | ||
|
|
d0d29168a5 | ||
|
|
abda257763 | ||
|
|
1b91bc2e02 | ||
|
|
56b9e785e5 | ||
|
|
081447c9b1 | ||
|
|
260117289b | ||
|
|
73dca749ca | ||
|
|
8ac380bfb3 | ||
|
|
aeac7e08f9 | ||
|
|
7c9cdc4353 | ||
|
|
67572785cf | ||
|
|
8a70e7634d | ||
|
|
6dd5589a9c | ||
|
|
78058ce5f3 | ||
|
|
fd5884189c | ||
|
|
ab7a9f008d | ||
|
|
a8eb2a7fbe | ||
|
|
fbb4f4dec8 | ||
|
|
46fd0a61ba | ||
|
|
6444c813dc | ||
|
|
dc8a2b0398 | ||
|
|
d9780e27cd | ||
|
|
ab44d03771 | ||
|
|
b53cb256e5 | ||
|
|
c3075923f4 | ||
|
|
a32881313b | ||
|
|
07548bc898 | ||
|
|
0e544fe835 | ||
|
|
21ac4c34fb | ||
|
|
d2fa4a5724 | ||
|
|
4bdf98cf20 | ||
|
|
5da09f0c23 | ||
|
|
48357ee0c6 | ||
|
|
6506166f02 | ||
|
|
5f130b25ad | ||
|
|
a9fd128910 | ||
|
|
cb94298a02 | ||
|
|
046084c0b8 | ||
|
|
18760253b9 | ||
|
|
ded6376ece | ||
|
|
a4d60a451d | ||
|
|
0f4030de5d | ||
|
|
f1a5a3ec50 | ||
|
|
87beaefbb8 | ||
|
|
451847183d | ||
|
|
7625a3a4b0 | ||
|
|
6db696748b | ||
|
|
14f9ae679d | ||
|
|
4789d2219c | ||
|
|
eacecf657c | ||
|
|
1de0be240f | ||
|
|
ea4bec2bad | ||
|
|
36d95bd3b9 | ||
|
|
c33b310b48 | ||
|
|
8af76c68a8 | ||
|
|
1f56f51740 | ||
|
|
49b3c0dba5 | ||
|
|
a718d07f64 | ||
|
|
f6da7ee135 | ||
|
|
7eb266de69 | ||
|
|
9f821dabeb | ||
|
|
a27b8395b1 | ||
|
|
b1b4e64c97 | ||
|
|
c1058c17aa | ||
|
|
059c347fc2 | ||
|
|
7145e021f9 | ||
|
|
9723f4f76c | ||
|
|
db72d0cc05 | ||
|
|
00b7d5b858 | ||
|
|
6cd0c11ab1 | ||
|
|
c1ab55a3f2 | ||
|
|
c756e3ba96 | ||
|
|
566f37b65b | ||
|
|
6d1ff27410 | ||
|
|
be55b41427 | ||
|
|
a4f01b41a4 | ||
|
|
01e1c28dd9 | ||
|
|
51e41b16db | ||
|
|
9e9cdc2e6d | ||
|
|
bc1d590ca7 | ||
|
|
900d9c294d | ||
|
|
65aac16139 | ||
|
|
4903f462f6 | ||
|
|
44b5a19c13 | ||
|
|
ba8fa6c403 | ||
|
|
5b3083e9e1 | ||
|
|
523a16aa35 | ||
|
|
43a748fd15 | ||
|
|
15a80b56b5 | ||
|
|
b14bfb9fa2 | ||
|
|
56950ece69 | ||
|
|
1d2ac78443 | ||
|
|
8b7aa1a168 | ||
|
|
1180d919f5 | ||
|
|
347417ee12 | ||
|
|
fb27e3f919 | ||
|
|
edb16f8aab | ||
|
|
5c56e947fe | ||
|
|
571871b084 | ||
|
|
8340c40647 | ||
|
|
9818055b0e | ||
|
|
484823c97d | ||
|
|
3ce17b01dc | ||
|
|
e68c566c1c | ||
|
|
19d16aa941 | ||
|
|
6852713121 | ||
|
|
c97500b117 | ||
|
|
85040a9067 | ||
|
|
b8285c17e6 | ||
|
|
332dfb4b92 | ||
|
|
cb279bb14b | ||
|
|
60c9cf74ce | ||
|
|
ff6754eb04 | ||
|
|
e6b9aba997 | ||
|
|
0302894bfb | ||
|
|
e46394c3be | ||
|
|
1885e41789 | ||
|
|
2fb9db994b | ||
|
|
287aaf1696 | ||
|
|
0e6f5a155e | ||
|
|
88852e2330 | ||
|
|
6369c50614 | ||
|
|
a22d0a35a4 | ||
|
|
c93c02df85 | ||
|
|
e584c6e1a7 | ||
|
|
64e4c19971 | ||
|
|
551b8f6785 | ||
|
|
fbbe1c1b91 | ||
|
|
1a85745bf1 | ||
|
|
0d1fea8134 | ||
|
|
19417e76e7 | ||
|
|
53d88a79ef | ||
|
|
4c21b7e680 | ||
|
|
a8f689c223 | ||
|
|
ba8e3f9bc5 | ||
|
|
477200d1f9 | ||
|
|
662738a7e5 | ||
|
|
f992b77535 | ||
|
|
21b2f135b5 | ||
|
|
71e6be5d99 | ||
|
|
df8e7d0a9a | ||
|
|
64422a48d9 | ||
|
|
04f9c62772 | ||
|
|
2185d07f05 | ||
|
|
a49d28e00e | ||
|
|
629128c497 | ||
|
|
70682b043e | ||
|
|
b741d26eb5 | ||
|
|
cf8fa4a404 | ||
|
|
fe76a1b179 | ||
|
|
df4957307f | ||
|
|
e21f56e801 | ||
|
|
e51b55e03c | ||
|
|
296b8249cb | ||
|
|
7c6b840199 | ||
|
|
2a6ff4cbfc | ||
|
|
6ad5e2fcf3 | ||
|
|
37811320ef | ||
|
|
99ac7f5f9e | ||
|
|
96aca741a2 | ||
|
|
12ec931237 | ||
|
|
0e293a4ec9 | ||
|
|
163712a23b | ||
|
|
5f4d463780 | ||
|
|
abc8121aa8 | ||
|
|
8c47277141 | ||
|
|
36b5cd18e8 | ||
|
|
1e72e32ec3 | ||
|
|
8e5e5a563d | ||
|
|
98049e7eda | ||
|
|
25aa893bad | ||
|
|
b270a20274 | ||
|
|
f0262dd10e | ||
|
|
f8b673eccd | ||
|
|
0c0c9a0030 | ||
|
|
d1f79317cf | ||
|
|
fa58522242 | ||
|
|
aa6fd3d888 | ||
|
|
ebdd8834a9 | ||
|
|
fe8d5627e0 | ||
|
|
b242e3280b | ||
|
|
cc01caaecb | ||
|
|
e7b5ee7518 | ||
|
|
63073b65c0 | ||
|
|
47cf72b8ba | ||
|
|
af24d72dd8 | ||
|
|
f451b455c4 | ||
|
|
16f4dfafb1 | ||
|
|
5b4e8b9d71 | ||
|
|
b06eaffeeb | ||
|
|
3d55540db1 | ||
|
|
3c2b9aab96 | ||
|
|
49d46e71de | ||
|
|
6c5168e1ec | ||
|
|
e91d5326fe | ||
|
|
48b573e835 | ||
|
|
4788eb24ab | ||
|
|
3ed2783f34 | ||
|
|
c0e8a41a6f | ||
|
|
23b217af24 | ||
|
|
3dab19f933 | ||
|
|
05efb6291f | ||
|
|
eba49fdefd | ||
|
|
29f2c49374 | ||
|
|
2245371696 | ||
|
|
74631d5808 | ||
|
|
9264b0ca09 | ||
|
|
a96fb92939 | ||
|
|
ae59e1f72e | ||
|
|
47167a4e6f | ||
|
|
23cabd67fb | ||
|
|
e66410b932 | ||
|
|
c3bba05770 | ||
|
|
69b91f2760 | ||
|
|
e6b09580b4 | ||
|
|
36e663edda | ||
|
|
60e2029e70 | ||
|
|
5e1db43e34 | ||
|
|
6e9b743296 | ||
|
|
ef9710d8e2 | ||
|
|
468b3b9c8c | ||
|
|
0d8c853917 | ||
|
|
df3b868fe7 | ||
|
|
3f6a5ab6ba | ||
|
|
aa1f4389b1 | ||
|
|
246cd2aee9 | ||
|
|
0b6f8149d1 | ||
|
|
4dda2ad58b | ||
|
|
15bb14fcf9 | ||
|
|
b68114375f | ||
|
|
83a09b3cf2 | ||
|
|
3690cb12e6 | ||
|
|
b4de4826c4 | ||
|
|
b06df637c7 | ||
|
|
9bb9ae43f9 | ||
|
|
35e7172b89 | ||
|
|
abebf4d955 | ||
|
|
1c826d253b | ||
|
|
d1b454232d | ||
|
|
be3b41acc6 | ||
|
|
2a3e51ebfe | ||
|
|
1382fda1c9 | ||
|
|
c658096c17 | ||
|
|
6097919958 | ||
|
|
330bdde0a3 | ||
|
|
a55404fa2e | ||
|
|
c8c7c9f376 | ||
|
|
df34c1ce45 | ||
|
|
da1d66c938 | ||
|
|
d32926a7e5 | ||
|
|
7f008a7d1e | ||
|
|
eab3f704f5 | ||
|
|
a50e89c38e | ||
|
|
85723a138f | ||
|
|
9c69ba6f6f | ||
|
|
e84ed65525 | ||
|
|
4060abd3aa | ||
|
|
c924a0428d | ||
|
|
33ef1b3a30 | ||
|
|
a6caf4b948 | ||
|
|
cc7e11da99 | ||
|
|
a4c3efe783 | ||
|
|
4e22846e95 | ||
|
|
ddbd0cd095 | ||
|
|
255a3ec82c | ||
|
|
167c77baec | ||
|
|
ca2718366e | ||
|
|
58d3a643b9 | ||
|
|
718b8085fa | ||
|
|
64930d7440 | ||
|
|
4d2f948869 | ||
|
|
19c224cbe1 | ||
|
|
efd1581c01 | ||
|
|
ac85f590ba | ||
|
|
b0d3162875 | ||
|
|
4715a7e9e1 | ||
|
|
c5aec243c9 | ||
|
|
c76f3d3dba | ||
|
|
7add6e48b6 | ||
|
|
1267e0c076 | ||
|
|
361c093a35 | ||
|
|
9ad39a29f7 | ||
|
|
f5a1d8bff9 | ||
|
|
8c86afbd57 | ||
|
|
4d6e36df99 | ||
|
|
f51884e893 | ||
|
|
4afd9ecf16 | ||
|
|
ed3b311de4 | ||
|
|
d030fcc076 | ||
|
|
398da1f916 | ||
|
|
3a5741f534 | ||
|
|
c53b0b8a12 | ||
|
|
8fd34669ed | ||
|
|
be7e91899b | ||
|
|
74a822568e | ||
|
|
105c214d15 | ||
|
|
2b6a51ed34 | ||
|
|
e98c252490 | ||
|
|
17f5d6309f | ||
|
|
6a46ea04ab | ||
|
|
7bd97f6717 | ||
|
|
2a9c98ae40 | ||
|
|
1adf80c613 | ||
|
|
f823d3c73c | ||
|
|
91f0ed3fc3 | ||
|
|
04b56c7331 | ||
|
|
3c1a20097f | ||
|
|
966c4d4e14 | ||
|
|
6b8289d68e | ||
|
|
294421db9c | ||
|
|
9fdf991c27 | ||
|
|
77b33260f8 | ||
|
|
33e5f47c6c | ||
|
|
25ceb90678 | ||
|
|
27e29055cb | ||
|
|
810ce31f4b | ||
|
|
e3c91c9d29 | ||
|
|
2f47838ea1 | ||
|
|
0566e62995 | ||
|
|
aeac42be47 | ||
|
|
aa21ff7efd | ||
|
|
57d22a7bd1 | ||
|
|
6804bcbf12 | ||
|
|
6d34cc0b60 | ||
|
|
1bb375fe5c | ||
|
|
ed00243a0c | ||
|
|
1223e759a4 | ||
|
|
4fd3ec1a50 | ||
|
|
7f9cad1e4e | ||
|
|
437b8b140f | ||
|
|
8f0d9bd71f | ||
|
|
1378c616d6 | ||
|
|
3b5dfb3fb4 | ||
|
|
9c22be5d9c | ||
|
|
42dea89247 | ||
|
|
982a332679 | ||
|
|
441853f189 | ||
|
|
611329fc7f | ||
|
|
f3c135e583 | ||
|
|
7f84582b37 | ||
|
|
297526c49d | ||
|
|
d01d394a2b | ||
|
|
17d4369866 | ||
|
|
fb5e1393a4 | ||
|
|
18dbde9ed6 | ||
|
|
2a13491919 | ||
|
|
3509a1a7ff | ||
|
|
0defb614a4 |
@@ -1,3 +1 @@
|
|||||||
bin/
|
bin/
|
||||||
cross-out/
|
|
||||||
release-out/
|
|
||||||
|
|||||||
137
.github/CONTRIBUTING.md
vendored
137
.github/CONTRIBUTING.md
vendored
@@ -116,6 +116,60 @@ commit automatically with `git commit -s`.
|
|||||||
|
|
||||||
### Run the unit- and integration-tests
|
### Run the unit- and integration-tests
|
||||||
|
|
||||||
|
Running tests:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
This runs all unit and integration tests, in a containerized environment.
|
||||||
|
Locally, every package can be tested separately with standard Go tools, but
|
||||||
|
integration tests are skipped if local user doesn't have enough permissions or
|
||||||
|
worker binaries are not installed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# run unit tests only
|
||||||
|
make test-unit
|
||||||
|
|
||||||
|
# run integration tests only
|
||||||
|
make test-integration
|
||||||
|
|
||||||
|
# test a specific package
|
||||||
|
TESTPKGS=./bake make test
|
||||||
|
|
||||||
|
# run all integration tests with a specific worker
|
||||||
|
TESTFLAGS="--run=//worker=remote -v" make test-integration
|
||||||
|
|
||||||
|
# run a specific integration test
|
||||||
|
TESTFLAGS="--run /TestBuild/worker=remote/ -v" make test-integration
|
||||||
|
|
||||||
|
# run a selection of integration tests using a regexp
|
||||||
|
TESTFLAGS="--run /TestBuild.*/worker=remote/ -v" make test-integration
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> Set `TEST_KEEP_CACHE=1` for the test framework to keep external dependant
|
||||||
|
> images in a docker volume if you are repeatedly calling `make test`. This
|
||||||
|
> helps to avoid rate limiting on the remote registry side.
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> Set `TEST_DOCKERD=1` for the test framework to enable the docker workers,
|
||||||
|
> specifically the `docker` and `docker-container` drivers.
|
||||||
|
>
|
||||||
|
> The docker tests cannot be run in parallel, so require passing `--parallel=1`
|
||||||
|
> in `TESTFLAGS`.
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> If you are working behind a proxy, you can set some of or all
|
||||||
|
> `HTTP_PROXY=http://ip:port`, `HTTPS_PROXY=http://ip:port`, `NO_PROXY=http://ip:port`
|
||||||
|
> for the test framework to specify the proxy build args.
|
||||||
|
|
||||||
|
|
||||||
|
### Run the helper commands
|
||||||
|
|
||||||
To enter a demo container environment and experiment, you may run:
|
To enter a demo container environment and experiment, you may run:
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -134,6 +188,89 @@ To generate new vendored files with go modules run:
|
|||||||
$ make vendor
|
$ make vendor
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Generate profiling data
|
||||||
|
|
||||||
|
You can configure Buildx to generate [`pprof`](https://github.com/google/pprof)
|
||||||
|
memory and CPU profiles to analyze and optimize your builds. These profiles are
|
||||||
|
useful for identifying performance bottlenecks, detecting memory
|
||||||
|
inefficiencies, and ensuring the program (Buildx) runs efficiently.
|
||||||
|
|
||||||
|
The following environment variables control whether Buildx generates profiling
|
||||||
|
data for builds:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
When set, Buildx emits profiling samples for the builds to the location
|
||||||
|
specified by the environment variable.
|
||||||
|
|
||||||
|
To analyze and visualize profiling samples, you need `pprof` from the Go
|
||||||
|
toolchain, and (optionally) GraphViz for visualization in a graphical format.
|
||||||
|
|
||||||
|
To inspect profiling data with `pprof`:
|
||||||
|
|
||||||
|
1. Build a local binary of Buildx from source.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary gets exported to `./bin/build/buildx`.
|
||||||
|
|
||||||
|
2. Run a build and with the environment variables set to generate profiling data.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||||
|
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||||
|
$ ./bin/build/buildx bake
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates `buildx_cpu.prof` and `buildx_mem.prof` for the build.
|
||||||
|
|
||||||
|
3. Start `pprof` and specify the filename of the profile that you want to
|
||||||
|
analyze.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof buildx_cpu.prof
|
||||||
|
```
|
||||||
|
|
||||||
|
This opens the `pprof` interactive console. From here, you can inspect the
|
||||||
|
profiling sample using various commands. For example, use `top 10` command
|
||||||
|
to view the top 10 most time-consuming entries.
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
(pprof) top 10
|
||||||
|
Showing nodes accounting for 3.04s, 91.02% of 3.34s total
|
||||||
|
Dropped 123 nodes (cum <= 0.02s)
|
||||||
|
Showing top 10 nodes out of 159
|
||||||
|
flat flat% sum% cum cum%
|
||||||
|
1.14s 34.13% 34.13% 1.14s 34.13% syscall.syscall
|
||||||
|
0.91s 27.25% 61.38% 0.91s 27.25% runtime.kevent
|
||||||
|
0.35s 10.48% 71.86% 0.35s 10.48% runtime.pthread_cond_wait
|
||||||
|
0.22s 6.59% 78.44% 0.22s 6.59% runtime.pthread_cond_signal
|
||||||
|
0.15s 4.49% 82.93% 0.15s 4.49% runtime.usleep
|
||||||
|
0.10s 2.99% 85.93% 0.10s 2.99% runtime.memclrNoHeapPointers
|
||||||
|
0.10s 2.99% 88.92% 0.10s 2.99% runtime.memmove
|
||||||
|
0.03s 0.9% 89.82% 0.03s 0.9% runtime.madvise
|
||||||
|
0.02s 0.6% 90.42% 0.02s 0.6% runtime.(*mspan).typePointersOfUnchecked
|
||||||
|
0.02s 0.6% 91.02% 0.02s 0.6% runtime.pcvalue
|
||||||
|
```
|
||||||
|
|
||||||
|
To view the call graph in a GUI, run `go tool pprof -http=:8081 <sample>`.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Requires [GraphViz](https://www.graphviz.org/) to be installed.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go tool pprof -http=:8081 buildx_cpu.prof
|
||||||
|
Serving web UI on http://127.0.0.1:8081
|
||||||
|
http://127.0.0.1:8081
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about using `pprof` and how to interpret the call graph,
|
||||||
|
refer to the [`pprof` README](https://github.com/google/pprof/blob/main/doc/README.md).
|
||||||
|
|
||||||
### Conventions
|
### Conventions
|
||||||
|
|
||||||
|
|||||||
124
.github/ISSUE_TEMPLATE/bug.yml
vendored
Normal file
124
.github/ISSUE_TEMPLATE/bug.yml
vendored
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
|
||||||
|
name: Bug Report
|
||||||
|
description: Report a bug
|
||||||
|
labels:
|
||||||
|
- status/triage
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thank you for taking the time to report a bug!
|
||||||
|
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Contributing guidelines
|
||||||
|
description: |
|
||||||
|
Please read the contributing guidelines before proceeding.
|
||||||
|
options:
|
||||||
|
- label: I've read the [contributing guidelines](https://github.com/docker/buildx/blob/master/.github/CONTRIBUTING.md) and wholeheartedly agree
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: I've found a bug and checked that ...
|
||||||
|
description: |
|
||||||
|
Make sure that your request fulfills all of the following requirements.
|
||||||
|
If one requirement cannot be satisfied, explain in detail why.
|
||||||
|
options:
|
||||||
|
- label: ... the documentation does not mention anything about my problem
|
||||||
|
- label: ... there are no open or closed issues that are related to my problem
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Please provide a brief description of the bug in 1-2 sentences.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Expected behaviour
|
||||||
|
description: |
|
||||||
|
Please describe precisely what you'd expect to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Actual behaviour
|
||||||
|
description: |
|
||||||
|
Please describe precisely what is actually happening.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: Buildx version
|
||||||
|
description: |
|
||||||
|
Output of `docker buildx version` command.
|
||||||
|
Example: `github.com/docker/buildx v0.8.1 5fac64c2c49dae1320f2b51f1a899ca451935554`
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Docker info
|
||||||
|
description: |
|
||||||
|
Output of `docker info` command.
|
||||||
|
render: text
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Builders list
|
||||||
|
description: |
|
||||||
|
Output of `docker buildx ls` command.
|
||||||
|
render: text
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Configuration
|
||||||
|
description: >
|
||||||
|
Please provide a minimal Dockerfile, bake definition (if applicable) and
|
||||||
|
invoked commands to help reproducing your issue.
|
||||||
|
placeholder: |
|
||||||
|
```dockerfile
|
||||||
|
FROM alpine
|
||||||
|
echo hello
|
||||||
|
```
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
group "default" {
|
||||||
|
targets = ["app"]
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
dockerfile = "Dockerfile"
|
||||||
|
target = "build"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build .
|
||||||
|
$ docker buildx bake
|
||||||
|
```
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Build logs
|
||||||
|
description: |
|
||||||
|
Please provide logs output (and/or BuildKit logs if applicable).
|
||||||
|
render: text
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional info
|
||||||
|
description: |
|
||||||
|
Please provide any additional information that could be useful.
|
||||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Questions and Discussions
|
||||||
|
url: https://github.com/docker/buildx/discussions/new
|
||||||
|
about: Use Github Discussions to ask questions and/or open discussion topics.
|
||||||
|
- name: Command line reference
|
||||||
|
url: https://docs.docker.com/engine/reference/commandline/buildx/
|
||||||
|
about: Read the command line reference.
|
||||||
|
- name: Documentation
|
||||||
|
url: https://docs.docker.com/build/
|
||||||
|
about: Read the documentation.
|
||||||
15
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
15
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
|
||||||
|
name: Feature request
|
||||||
|
description: Missing functionality? Come tell us about it!
|
||||||
|
labels:
|
||||||
|
- kind/enhancement
|
||||||
|
- status/triage
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: What is the feature you want to see?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
44
.github/SECURITY.md
vendored
Normal file
44
.github/SECURITY.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
The maintainers of Docker Buildx take security seriously. If you discover
|
||||||
|
a security issue, please bring it to their attention right away!
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Please **DO NOT** file a public issue, instead send your report privately
|
||||||
|
to [security@docker.com](mailto:security@docker.com).
|
||||||
|
|
||||||
|
Reporter(s) can expect a response within 72 hours, acknowledging the issue was
|
||||||
|
received.
|
||||||
|
|
||||||
|
## Review Process
|
||||||
|
|
||||||
|
After receiving the report, an initial triage and technical analysis is
|
||||||
|
performed to confirm the report and determine its scope. We may request
|
||||||
|
additional information in this stage of the process.
|
||||||
|
|
||||||
|
Once a reviewer has confirmed the relevance of the report, a draft security
|
||||||
|
advisory will be created on GitHub. The draft advisory will be used to discuss
|
||||||
|
the issue with maintainers, the reporter(s), and where applicable, other
|
||||||
|
affected parties under embargo.
|
||||||
|
|
||||||
|
If the vulnerability is accepted, a timeline for developing a patch, public
|
||||||
|
disclosure, and patch release will be determined. If there is an embargo period
|
||||||
|
on public disclosure before the patch release, the reporter(s) are expected to
|
||||||
|
participate in the discussion of the timeline and abide by agreed upon dates
|
||||||
|
for public disclosure.
|
||||||
|
|
||||||
|
## Accreditation
|
||||||
|
|
||||||
|
Security reports are greatly appreciated and we will publicly thank you,
|
||||||
|
although we will keep your name confidential if you request it. We also like to
|
||||||
|
send gifts - if you're into swag, make sure to let us know. We do not currently
|
||||||
|
offer a paid security bounty program at this time.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Once a new feature release is cut, support for the previous feature release is
|
||||||
|
discontinued. An exception may be made for urgent security releases that occur
|
||||||
|
shortly after a new feature release. Buildx does not offer LTS (Long-Term Support)
|
||||||
|
releases. Refer to the [Support Policy](https://github.com/docker/buildx/blob/master/PROJECT.md#support-policy)
|
||||||
|
for further details.
|
||||||
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -5,6 +5,11 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
|
ignore:
|
||||||
|
# ignore this dependency
|
||||||
|
# it seems a bug with dependabot as pining to commit sha should not
|
||||||
|
# trigger a new version: https://github.com/docker/buildx/pull/2222#issuecomment-1919092153
|
||||||
|
- dependency-name: "docker/docs"
|
||||||
labels:
|
labels:
|
||||||
- "dependencies"
|
- "area/dependencies"
|
||||||
- "bot"
|
- "bot"
|
||||||
|
|||||||
109
.github/labeler.yml
vendored
Normal file
109
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
|
||||||
|
# Add 'area/project' label to changes in basic project documentation and .github folder, excluding .github/workflows
|
||||||
|
area/project:
|
||||||
|
- all:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- .github/**
|
||||||
|
- LICENSE
|
||||||
|
- AUTHORS
|
||||||
|
- MAINTAINERS
|
||||||
|
- PROJECT.md
|
||||||
|
- README.md
|
||||||
|
- .gitignore
|
||||||
|
- codecov.yml
|
||||||
|
- all-globs-to-all-files: '!.github/workflows/*'
|
||||||
|
|
||||||
|
# Add 'area/github-actions' label to changes in the .github/workflows folder
|
||||||
|
area/ci:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '.github/workflows/**'
|
||||||
|
|
||||||
|
# Add 'area/bake' label to changes in the bake
|
||||||
|
area/bake:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'bake/**'
|
||||||
|
|
||||||
|
# Add 'area/bake/compose' label to changes in the bake+compose
|
||||||
|
area/bake/compose:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- bake/compose.go
|
||||||
|
- bake/compose_test.go
|
||||||
|
|
||||||
|
# Add 'area/build' label to changes in build files
|
||||||
|
area/build:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'build/**'
|
||||||
|
|
||||||
|
# Add 'area/builder' label to changes in builder files
|
||||||
|
area/builder:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'builder/**'
|
||||||
|
|
||||||
|
# Add 'area/cli' label to changes in the CLI
|
||||||
|
area/cli:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- cmd/**
|
||||||
|
- commands/**
|
||||||
|
|
||||||
|
# Add 'area/controller' label to changes in the controller
|
||||||
|
area/controller:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'controller/**'
|
||||||
|
|
||||||
|
# Add 'area/docs' label to markdown files in the docs folder
|
||||||
|
area/docs:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'docs/**/*.md'
|
||||||
|
|
||||||
|
# Add 'area/dependencies' label to changes in go dependency files
|
||||||
|
area/dependencies:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- go.mod
|
||||||
|
- go.sum
|
||||||
|
- vendor/**
|
||||||
|
|
||||||
|
# Add 'area/driver' label to changes in the driver folder
|
||||||
|
area/driver:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/docker' label to changes in the docker driver
|
||||||
|
area/driver/docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/docker/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/docker-container' label to changes in the docker-container driver
|
||||||
|
area/driver/docker-container:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/docker-container/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/kubernetes' label to changes in the kubernetes driver
|
||||||
|
area/driver/kubernetes:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/kubernetes/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/remote' label to changes in the remote driver
|
||||||
|
area/driver/remote:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/remote/**'
|
||||||
|
|
||||||
|
# Add 'area/hack' label to changes in the hack folder
|
||||||
|
area/hack:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'hack/**'
|
||||||
|
|
||||||
|
# Add 'area/history' label to changes in history command
|
||||||
|
area/history:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'commands/history/**'
|
||||||
|
|
||||||
|
# Add 'area/tests' label to changes in test files
|
||||||
|
area/tests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- tests/**
|
||||||
|
- '**/*_test.go'
|
||||||
735
.github/releases.json
vendored
Normal file
735
.github/releases.json
vendored
Normal file
@@ -0,0 +1,735 @@
|
|||||||
|
{
|
||||||
|
"latest": {
|
||||||
|
"id": 90741208,
|
||||||
|
"tag_name": "v0.10.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.2": {
|
||||||
|
"id": 90741208,
|
||||||
|
"tag_name": "v0.10.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.2/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.1": {
|
||||||
|
"id": 90346950,
|
||||||
|
"tag_name": "v0.10.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.0": {
|
||||||
|
"id": 88388110,
|
||||||
|
"tag_name": "v0.10.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.0-rc3": {
|
||||||
|
"id": 88191592,
|
||||||
|
"tag_name": "v0.10.0-rc3",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc3",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.0-rc2": {
|
||||||
|
"id": 86248476,
|
||||||
|
"tag_name": "v0.10.0-rc2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.provenance.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.sbom.json",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.10.0-rc1": {
|
||||||
|
"id": 85963900,
|
||||||
|
"tag_name": "v0.10.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.9.1": {
|
||||||
|
"id": 74760068,
|
||||||
|
"tag_name": "v0.9.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.9.0": {
|
||||||
|
"id": 74546589,
|
||||||
|
"tag_name": "v0.9.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.9.0-rc2": {
|
||||||
|
"id": 74052235,
|
||||||
|
"tag_name": "v0.9.0-rc2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0-rc2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.9.0-rc1": {
|
||||||
|
"id": 73389692,
|
||||||
|
"tag_name": "v0.9.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.8.2": {
|
||||||
|
"id": 63479740,
|
||||||
|
"tag_name": "v0.8.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.2/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.8.1": {
|
||||||
|
"id": 62289050,
|
||||||
|
"tag_name": "v0.8.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.8.0": {
|
||||||
|
"id": 61423774,
|
||||||
|
"tag_name": "v0.8.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.8.0-rc1": {
|
||||||
|
"id": 60513568,
|
||||||
|
"tag_name": "v0.8.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.7.1": {
|
||||||
|
"id": 54098347,
|
||||||
|
"tag_name": "v0.7.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.7.0": {
|
||||||
|
"id": 53109422,
|
||||||
|
"tag_name": "v0.7.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.7.0-rc1": {
|
||||||
|
"id": 52726324,
|
||||||
|
"tag_name": "v0.7.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.windows-arm64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/checksums.txt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.6.3": {
|
||||||
|
"id": 48691641,
|
||||||
|
"tag_name": "v0.6.3",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.3",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.windows-arm64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.6.2": {
|
||||||
|
"id": 48207405,
|
||||||
|
"tag_name": "v0.6.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.windows-arm64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.6.1": {
|
||||||
|
"id": 47064772,
|
||||||
|
"tag_name": "v0.6.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.windows-arm64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.6.0": {
|
||||||
|
"id": 46343260,
|
||||||
|
"tag_name": "v0.6.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.windows-arm64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.6.0-rc1": {
|
||||||
|
"id": 46230351,
|
||||||
|
"tag_name": "v0.6.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-riscv64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.windows-amd64.exe",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.windows-arm64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.5.1": {
|
||||||
|
"id": 35276550,
|
||||||
|
"tag_name": "v0.5.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-universal",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.5.0": {
|
||||||
|
"id": 35268960,
|
||||||
|
"tag_name": "v0.5.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-universal",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.5.0-rc1": {
|
||||||
|
"id": 35015334,
|
||||||
|
"tag_name": "v0.5.0-rc1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.0-rc1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.4.2": {
|
||||||
|
"id": 30007794,
|
||||||
|
"tag_name": "v0.4.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.4.1": {
|
||||||
|
"id": 26067509,
|
||||||
|
"tag_name": "v0.4.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.4.0": {
|
||||||
|
"id": 26028174,
|
||||||
|
"tag_name": "v0.4.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.3.1": {
|
||||||
|
"id": 20316235,
|
||||||
|
"tag_name": "v0.3.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.3.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.3.0": {
|
||||||
|
"id": 19029664,
|
||||||
|
"tag_name": "v0.3.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.3.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.2.2": {
|
||||||
|
"id": 17671545,
|
||||||
|
"tag_name": "v0.2.2",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.2",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.2.1": {
|
||||||
|
"id": 17582885,
|
||||||
|
"tag_name": "v0.2.1",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.1",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"v0.2.0": {
|
||||||
|
"id": 16965310,
|
||||||
|
"tag_name": "v0.2.0",
|
||||||
|
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.0",
|
||||||
|
"assets": [
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.darwin-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-amd64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm-v6",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm-v7",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm64",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-ppc64le",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-s390x",
|
||||||
|
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.windows-amd64.exe"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
490
.github/workflows/build.yml
vendored
490
.github/workflows/build.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: build
|
name: build
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -13,66 +22,353 @@ on:
|
|||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
paths-ignore:
|
||||||
- 'master'
|
- '.github/releases.json'
|
||||||
- 'v[0-9]*'
|
- 'README.md'
|
||||||
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
SCOUT_VERSION: "1.11.0"
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
RELEASE_OUT: "./release-out"
|
DESTDIR: "./bin"
|
||||||
|
TEST_CACHE_SCOPE: "test"
|
||||||
|
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||||
|
GOTESTSUM_FORMAT: "standard-verbose"
|
||||||
|
GO_VERSION: "1.23"
|
||||||
|
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test-integration:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
env:
|
||||||
|
TESTFLAGS_DOCKER: "-v --parallel=1 --timeout=30m"
|
||||||
|
TEST_IMAGE_BUILD: "0"
|
||||||
|
TEST_IMAGE_ID: "buildx-tests"
|
||||||
|
TEST_COVERAGE: "1"
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
buildkit:
|
||||||
|
- master
|
||||||
|
- latest
|
||||||
|
- buildx-stable-1
|
||||||
|
- v0.20.2
|
||||||
|
- v0.19.0
|
||||||
|
- v0.18.2
|
||||||
|
worker:
|
||||||
|
- docker-container
|
||||||
|
- remote
|
||||||
|
pkg:
|
||||||
|
- ./tests
|
||||||
|
mode:
|
||||||
|
- ""
|
||||||
|
- experimental
|
||||||
|
include:
|
||||||
|
- worker: docker
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: docker
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@27.5"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: "docker@26.1"
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
echo "TESTREPORTS_NAME=${{ github.job }}-$(echo "${{ matrix.pkg }}-${{ matrix.buildkit }}-${{ matrix.worker }}-${{ matrix.mode }}" | tr -dc '[:alnum:]-\n\r' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV
|
||||||
|
if [ -n "${{ matrix.buildkit }}" ]; then
|
||||||
|
echo "TEST_BUILDKIT_TAG=${{ matrix.buildkit }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
||||||
|
case "${{ matrix.worker }}" in
|
||||||
|
docker | docker+containerd | docker@* | docker+containerd@*)
|
||||||
|
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "TESTFLAGS=${{ env.TESTFLAGS }} $testFlags" >> $GITHUB_ENV
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [[ "${{ matrix.worker }}" == "docker"* ]]; then
|
||||||
|
echo "TEST_DOCKERD=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if [ "${{ matrix.mode }}" = "experimental" ]; then
|
||||||
|
echo "TEST_BUILDX_EXPERIMENTAL=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Build test image
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: .
|
||||||
|
targets: integration-test
|
||||||
|
set: |
|
||||||
|
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||||
-
|
-
|
||||||
name: Test
|
name: Test
|
||||||
uses: docker/bake-action@v2
|
run: |
|
||||||
|
./hack/test
|
||||||
|
env:
|
||||||
|
TEST_REPORT_SUFFIX: "-${{ env.TESTREPORTS_NAME }}"
|
||||||
|
TESTPKGS: "${{ matrix.pkg }}"
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
targets: test
|
directory: ./bin/testreports
|
||||||
set: |
|
flags: integration
|
||||||
*.cache-from=type=gha,scope=test
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
*.cache-to=type=gha,scope=test
|
disable_file_fixes: true
|
||||||
|
-
|
||||||
|
name: Generate annotations
|
||||||
|
if: always()
|
||||||
|
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||||
|
with:
|
||||||
|
directory: ./bin/testreports
|
||||||
|
-
|
||||||
|
name: Upload test reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
|
path: ./bin/testreports
|
||||||
|
|
||||||
|
test-unit:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-24.04
|
||||||
|
- macos-14
|
||||||
|
- windows-2022
|
||||||
|
env:
|
||||||
|
SKIP_INTEGRATION_TESTS: 1
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Setup Git config
|
||||||
|
run: |
|
||||||
|
git config --global core.autocrlf false
|
||||||
|
git config --global core.eol lf
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: "${{ env.GO_VERSION }}"
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
testreportsName=${{ github.job }}--${{ matrix.os }}
|
||||||
|
testreportsBaseDir=./bin/testreports
|
||||||
|
testreportsDir=$testreportsBaseDir/$testreportsName
|
||||||
|
echo "TESTREPORTS_NAME=$testreportsName" >> $GITHUB_ENV
|
||||||
|
echo "TESTREPORTS_BASEDIR=$testreportsBaseDir" >> $GITHUB_ENV
|
||||||
|
echo "TESTREPORTS_DIR=$testreportsDir" >> $GITHUB_ENV
|
||||||
|
mkdir -p $testreportsDir
|
||||||
|
shell: bash
|
||||||
|
-
|
||||||
|
name: Install gotestsum
|
||||||
|
run: |
|
||||||
|
go install gotest.tools/gotestsum@${{ env.GOTESTSUM_VERSION }}
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
env:
|
||||||
|
TMPDIR: ${{ runner.temp }}
|
||||||
|
run: |
|
||||||
|
gotestsum \
|
||||||
|
--jsonfile="${{ env.TESTREPORTS_DIR }}/go-test-report.json" \
|
||||||
|
--junitfile="${{ env.TESTREPORTS_DIR }}/junit-report.xml" \
|
||||||
|
--packages="./..." \
|
||||||
|
-- \
|
||||||
|
"-mod=vendor" \
|
||||||
|
"-coverprofile" "${{ env.TESTREPORTS_DIR }}/coverage.txt" \
|
||||||
|
"-covermode" "atomic" ${{ env.TESTFLAGS }}
|
||||||
|
shell: bash
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
disable_file_fixes: true
|
||||||
|
-
|
||||||
|
name: Generate annotations
|
||||||
|
if: always()
|
||||||
|
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||||
|
with:
|
||||||
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
|
-
|
||||||
|
name: Upload test reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
|
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||||
|
|
||||||
|
test-bsd-unit:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
continue-on-error: true
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- freebsd
|
||||||
|
- netbsd
|
||||||
|
- openbsd
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Sets semver Go version to be able to download tarball during vagrant setup
|
||||||
|
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
|
||||||
|
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Cache Vagrant boxes
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/.vagrant.d/boxes
|
||||||
|
key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-vagrant-${{ matrix.os }}-
|
||||||
|
-
|
||||||
|
name: Install vagrant
|
||||||
|
run: |
|
||||||
|
set -x
|
||||||
|
wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||||
|
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||||
|
sudo systemctl enable --now libvirtd
|
||||||
|
sudo chmod a+rw /var/run/libvirt/libvirt-sock
|
||||||
|
vagrant plugin install vagrant-libvirt
|
||||||
|
vagrant --version
|
||||||
|
-
|
||||||
|
name: Set up vagrant
|
||||||
|
run: |
|
||||||
|
ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
|
||||||
|
vagrant up --no-tty
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
|
||||||
|
vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
|
||||||
-
|
-
|
||||||
name: Upload coverage
|
name: Upload coverage
|
||||||
uses: codecov/codecov-action@v3
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
file: ./coverage/coverage.txt
|
files: ./coverage.txt
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit,${{ matrix.os }}
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
env:
|
||||||
|
RUNNER_OS: ${{ matrix.os }}
|
||||||
|
|
||||||
prepare:
|
govulncheck:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
targets: govulncheck
|
||||||
|
env:
|
||||||
|
GOVULNCHECK_FORMAT: sarif
|
||||||
|
-
|
||||||
|
name: Upload SARIF report
|
||||||
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||||
|
|
||||||
|
prepare-binaries:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Create matrix
|
name: Create matrix
|
||||||
id: platforms
|
id: platforms
|
||||||
run: |
|
run: |
|
||||||
echo ::set-output name=matrix::$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')
|
echo "matrix=$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')" >>${GITHUB_OUTPUT}
|
||||||
-
|
-
|
||||||
name: Show matrix
|
name: Show matrix
|
||||||
run: |
|
run: |
|
||||||
echo ${{ steps.platforms.outputs.matrix }}
|
echo ${{ steps.platforms.outputs.matrix }}
|
||||||
|
|
||||||
binaries:
|
binaries:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- prepare
|
- prepare-binaries
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
|
platform: ${{ fromJson(needs.prepare-binaries.outputs.matrix) }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
@@ -81,51 +377,63 @@ jobs:
|
|||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v2
|
run: |
|
||||||
with:
|
make release
|
||||||
targets: release
|
env:
|
||||||
set: |
|
PLATFORMS: ${{ matrix.platform }}
|
||||||
*.platform=${{ matrix.platform }}
|
CACHE_FROM: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
|
||||||
*.cache-from=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
|
CACHE_TO: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
||||||
*.cache-to=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: buildx
|
name: buildx-${{ env.PLATFORM_PAIR }}
|
||||||
path: ${{ env.RELEASE_OUT }}/*
|
path: ${{ env.DESTDIR }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
bin-image:
|
bin-image:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
needs:
|
||||||
|
- test-integration
|
||||||
|
- test-unit
|
||||||
|
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Free disk space
|
||||||
uses: actions/checkout@v3
|
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||||
|
with:
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
swap-storage: true
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
${{ env.REPO_SLUG }}
|
${{ env.REPO_SLUG }}
|
||||||
@@ -137,78 +445,92 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v2
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
${{ steps.meta.outputs.bake-file }}
|
cwd://${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
sbom: true
|
||||||
set: |
|
set: |
|
||||||
*.cache-from=type=gha,scope=bin-image
|
*.cache-from=type=gha,scope=bin-image
|
||||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||||
|
|
||||||
release:
|
scout:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
needs:
|
needs:
|
||||||
|
- bin-image
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
|
-
|
||||||
|
name: Scout
|
||||||
|
id: scout
|
||||||
|
uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4
|
||||||
|
with:
|
||||||
|
version: ${{ env.SCOUT_VERSION }}
|
||||||
|
format: sarif
|
||||||
|
image: registry://${{ env.REPO_SLUG }}:master
|
||||||
|
-
|
||||||
|
name: Upload SARIF report
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: ${{ steps.scout.outputs.result-file }}
|
||||||
|
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# required to create GitHub release
|
||||||
|
contents: write
|
||||||
|
needs:
|
||||||
|
- test-integration
|
||||||
|
- test-unit
|
||||||
- binaries
|
- binaries
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Download binaries
|
name: Download binaries
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: buildx
|
path: ${{ env.DESTDIR }}
|
||||||
path: ${{ env.RELEASE_OUT }}
|
pattern: buildx-*
|
||||||
|
merge-multiple: true
|
||||||
-
|
-
|
||||||
name: Create checksums
|
name: Create checksums
|
||||||
run: ./hack/hash-files
|
run: ./hack/hash-files
|
||||||
-
|
-
|
||||||
name: List artifacts
|
name: List artifacts
|
||||||
run: |
|
run: |
|
||||||
tree -nh ${{ env.RELEASE_OUT }}
|
tree -nh ${{ env.DESTDIR }}
|
||||||
-
|
-
|
||||||
name: Check artifacts
|
name: Check artifacts
|
||||||
run: |
|
run: |
|
||||||
find ${{ env.RELEASE_OUT }} -type f -exec file -e ascii -- {} +
|
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5
|
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: ${{ env.RELEASE_OUT }}/*
|
files: ${{ env.DESTDIR }}/*
|
||||||
|
|
||||||
buildkit-edge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
driver-opts: image=moby/buildkit:master
|
|
||||||
buildkitd-flags: --debug
|
|
||||||
-
|
|
||||||
# Just run a bake target to check eveything runs fine
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: binaries
|
|
||||||
|
|||||||
50
.github/workflows/codeql.yml
vendored
Normal file
50
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: codeql
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
env:
|
||||||
|
GO_VERSION: "1.23"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codeql:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
actions: read
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v3
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
-
|
||||||
|
name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v3
|
||||||
|
-
|
||||||
|
name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v3
|
||||||
|
with:
|
||||||
|
category: "/language:go"
|
||||||
75
.github/workflows/docs-release.yml
vendored
75
.github/workflows/docs-release.yml
vendored
@@ -1,56 +1,91 @@
|
|||||||
name: docs-release
|
name: docs-release
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Git tag'
|
||||||
|
required: true
|
||||||
release:
|
release:
|
||||||
types: [ released ]
|
types:
|
||||||
|
- released
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
open-pr:
|
open-pr:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout docs repo
|
name: Checkout docs repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
repository: docker/docker.github.io
|
repository: docker/docs
|
||||||
ref: master
|
ref: main
|
||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
run: |
|
run: |
|
||||||
rm -rf ./_data/buildx/*
|
rm -rf ./data/buildx/*
|
||||||
|
if [ -n "${{ github.event.inputs.tag }}" ]; then
|
||||||
|
echo "RELEASE_NAME=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "RELEASE_NAME=${{ github.event.release.name }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
-
|
|
||||||
name: Build docs
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
with:
|
||||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ github.event.release.name }}
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Generate yaml
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
|
provenance: false
|
||||||
set: |
|
set: |
|
||||||
*.output=/tmp/buildx-docs
|
*.output=/tmp/buildx-docs
|
||||||
env:
|
env:
|
||||||
DOCS_FORMATS: yaml
|
DOCS_FORMATS: yaml
|
||||||
-
|
-
|
||||||
name: Copy files
|
name: Copy yaml
|
||||||
run: |
|
run: |
|
||||||
cp /tmp/buildx-docs/out/reference/*.yaml ./_data/buildx/
|
cp /tmp/buildx-docs/out/reference/*.yaml ./data/buildx/
|
||||||
-
|
-
|
||||||
name: Commit changes
|
name: Update vendor
|
||||||
run: |
|
run: |
|
||||||
git add -A .
|
make vendor
|
||||||
|
env:
|
||||||
|
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||||
-
|
-
|
||||||
name: Create PR on docs repo
|
name: Create PR on docs repo
|
||||||
uses: peter-evans/create-pull-request@923ad837f191474af6b1721408744feb989a4c27 # v4.0.4
|
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
push-to-fork: docker-tools-robot/docker.github.io
|
||||||
commit-message: "build: update buildx reference to ${{ github.event.release.name }}"
|
commit-message: "vendor: github.com/docker/buildx ${{ env.RELEASE_NAME }}"
|
||||||
signoff: true
|
signoff: true
|
||||||
branch: dispatch/buildx-ref-${{ github.event.release.name }}
|
branch: dispatch/buildx-ref-${{ env.RELEASE_NAME }}
|
||||||
delete-branch: true
|
delete-branch: true
|
||||||
title: Update buildx reference to ${{ github.event.release.name }}
|
title: Update buildx reference to ${{ env.RELEASE_NAME }}
|
||||||
body: |
|
body: |
|
||||||
Update the buildx reference documentation to keep in sync with the latest release `${{ github.event.release.name }}`
|
Update the buildx reference documentation to keep in sync with the latest release `${{ env.RELEASE_NAME }}`
|
||||||
draft: false
|
draft: false
|
||||||
|
|||||||
95
.github/workflows/docs-upstream.yml
vendored
95
.github/workflows/docs-upstream.yml
vendored
@@ -3,6 +3,15 @@
|
|||||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
||||||
name: docs-upstream
|
name: docs-upstream
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -20,23 +29,27 @@ on:
|
|||||||
- '.github/workflows/docs-upstream.yml'
|
- '.github/workflows/docs-upstream.yml'
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docs-yaml:
|
docs-yaml:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build reference YAML docs
|
name: Build reference YAML docs
|
||||||
uses: docker/bake-action@v2
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
|
provenance: false
|
||||||
set: |
|
set: |
|
||||||
*.output=/tmp/buildx-docs
|
*.output=/tmp/buildx-docs
|
||||||
*.cache-from=type=gha,scope=docs-yaml
|
*.cache-from=type=gha,scope=docs-yaml
|
||||||
@@ -45,74 +58,18 @@ jobs:
|
|||||||
DOCS_FORMATS: yaml
|
DOCS_FORMATS: yaml
|
||||||
-
|
-
|
||||||
name: Upload reference YAML docs
|
name: Upload reference YAML docs
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: docs-yaml
|
name: docs-yaml
|
||||||
path: /tmp/buildx-docs/out/reference
|
path: /tmp/buildx-docs/out/reference
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
validate:
|
validate:
|
||||||
runs-on: ubuntu-latest
|
uses: docker/docs/.github/workflows/validate-upstream.yml@main
|
||||||
needs:
|
needs:
|
||||||
- docs-yaml
|
- docs-yaml
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
with:
|
||||||
repository: docker/docker.github.io
|
module-name: docker/buildx
|
||||||
-
|
data-files-id: docs-yaml
|
||||||
name: Install js-yaml
|
data-files-folder: buildx
|
||||||
run: npm install js-yaml
|
create-placeholder-stubs: true
|
||||||
-
|
|
||||||
# use the actual buildx ref that triggers this workflow, so we make
|
|
||||||
# sure pages fetched by docs repo are still valid
|
|
||||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/_config.yml#L164-L173
|
|
||||||
name: Set correct ref to fetch remote resources
|
|
||||||
uses: actions/github-script@v6
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const fs = require('fs');
|
|
||||||
const yaml = require('js-yaml');
|
|
||||||
|
|
||||||
const configFile = '_config.yml'
|
|
||||||
const config = yaml.load(fs.readFileSync(configFile, 'utf8'));
|
|
||||||
for (const remote of config['fetch-remote']) {
|
|
||||||
if (remote['repo'] != 'https://github.com/docker/buildx') {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
remote['ref'] = "${{ github.ref }}";
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
fs.writeFileSync(configFile, yaml.dump(config), 'utf8')
|
|
||||||
} catch (err) {
|
|
||||||
console.error(err.message)
|
|
||||||
process.exit(1)
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
# print docs jekyll config updated in previous step
|
|
||||||
yq _config.yml
|
|
||||||
# cleanup reference yaml docs and js-yaml module
|
|
||||||
rm -rf ./_data/buildx/* ./node_modules
|
|
||||||
-
|
|
||||||
name: Download built reference YAML docs
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs-yaml
|
|
||||||
path: ./_data/buildx/
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Validate
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: validate
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=docs-upstream
|
|
||||||
*.cache-to=type=gha,scope=docs-upstream,mode=max
|
|
||||||
|
|||||||
150
.github/workflows/e2e.yml
vendored
150
.github/workflows/e2e.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: e2e
|
name: e2e
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -11,26 +20,31 @@ on:
|
|||||||
- 'master'
|
- 'master'
|
||||||
- 'v[0-9]*'
|
- 'v[0-9]*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
paths-ignore:
|
||||||
- 'master'
|
- '.github/releases.json'
|
||||||
- 'v[0-9]*'
|
- 'README.md'
|
||||||
|
- 'docs/**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
DESTDIR: "./bin"
|
||||||
|
K3S_VERSION: "v1.32.2+k3s1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
env:
|
|
||||||
BIN_OUT: ./bin
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v2
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
targets: binaries
|
targets: binaries
|
||||||
set: |
|
set: |
|
||||||
@@ -40,18 +54,18 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Rename binary
|
name: Rename binary
|
||||||
run: |
|
run: |
|
||||||
mv ${{ env.BIN_OUT }}/buildx ${{ env.BIN_OUT }}/docker-buildx
|
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: binary
|
name: binary
|
||||||
path: ${{ env.BIN_OUT }}
|
path: ${{ env.DESTDIR }}/build
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
||||||
driver:
|
driver:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
strategy:
|
strategy:
|
||||||
@@ -79,6 +93,10 @@ jobs:
|
|||||||
driver-opt: qemu.install=true
|
driver-opt: qemu.install=true
|
||||||
- driver: remote
|
- driver: remote
|
||||||
endpoint: tcp://localhost:1234
|
endpoint: tcp://localhost:1234
|
||||||
|
- driver: docker-container
|
||||||
|
metadata-provenance: max
|
||||||
|
- driver: docker-container
|
||||||
|
metadata-warnings: true
|
||||||
exclude:
|
exclude:
|
||||||
- driver: docker
|
- driver: docker
|
||||||
multi-node: mnode-true
|
multi-node: mnode-true
|
||||||
@@ -93,14 +111,14 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
||||||
-
|
-
|
||||||
name: Install buildx
|
name: Install buildx
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: binary
|
name: binary
|
||||||
path: /home/runner/.docker/cli-plugins
|
path: /home/runner/.docker/cli-plugins
|
||||||
@@ -126,23 +144,18 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
if [ -n "${{ matrix.metadata-provenance }}" ]; then
|
||||||
|
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if [ -n "${{ matrix.metadata-warnings }}" ]; then
|
||||||
|
echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: debianmaster/actions-k3s@b9cf3f599fd118699a3c8a0d18a2f2bda6cf4ce4
|
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
|
||||||
id: k3s
|
|
||||||
with:
|
with:
|
||||||
version: v1.21.2-k3s1
|
version: ${{ env.K3S_VERSION }}
|
||||||
-
|
|
||||||
name: Config k3s
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
run: |
|
|
||||||
(set -x ; cat ${{ steps.k3s.outputs.kubeconfig }})
|
|
||||||
-
|
|
||||||
name: Check k3s nodes
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
run: |
|
|
||||||
kubectl get nodes
|
|
||||||
-
|
-
|
||||||
name: Launch remote buildkitd
|
name: Launch remote buildkitd
|
||||||
if: matrix.driver == 'remote'
|
if: matrix.driver == 'remote'
|
||||||
@@ -164,3 +177,78 @@ jobs:
|
|||||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||||
ENDPOINT: ${{ matrix.endpoint }}
|
ENDPOINT: ${{ matrix.endpoint }}
|
||||||
PLATFORMS: ${{ matrix.platforms }}
|
PLATFORMS: ${{ matrix.platforms }}
|
||||||
|
|
||||||
|
bake:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
env:
|
||||||
|
DOCKER_BUILD_CHECKS_ANNOTATIONS: false
|
||||||
|
DOCKER_BUILD_SUMMARY: false
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
-
|
||||||
|
# https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
|
||||||
|
source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
|
||||||
|
overrides: |
|
||||||
|
*.output=/tmp/bake-build
|
||||||
|
-
|
||||||
|
# https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
|
||||||
|
source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
|
||||||
|
targets: |
|
||||||
|
ld64-static-tgz
|
||||||
|
overrides: |
|
||||||
|
ld64-static-tgz.output=type=local,dest=./dist
|
||||||
|
ld64-static-tgz.platform=linux/amd64
|
||||||
|
ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
|
||||||
|
-
|
||||||
|
# https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
|
||||||
|
source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
|
||||||
|
targets: |
|
||||||
|
tests-buildkit
|
||||||
|
envs: |
|
||||||
|
BUILDKIT_REFS=v0.18.2
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Expose GitHub Runtime
|
||||||
|
uses: crazy-max/ghaction-github-runtime@v3
|
||||||
|
-
|
||||||
|
name: Environment variables
|
||||||
|
if: matrix.envs != ''
|
||||||
|
run: |
|
||||||
|
for l in "${{ matrix.envs }}"; do
|
||||||
|
echo "${l?}" >> $GITHUB_ENV
|
||||||
|
done
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
-
|
||||||
|
name: Install buildx
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: binary
|
||||||
|
path: /home/runner/.docker/cli-plugins
|
||||||
|
-
|
||||||
|
name: Fix perms and check
|
||||||
|
run: |
|
||||||
|
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
|
||||||
|
docker buildx version
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: ${{ matrix.source }}
|
||||||
|
targets: ${{ matrix.targets }}
|
||||||
|
set: ${{ matrix.overrides }}
|
||||||
|
|||||||
32
.github/workflows/labeler.yml
vendored
Normal file
32
.github/workflows/labeler.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: labeler
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
# same as global permission
|
||||||
|
contents: read
|
||||||
|
# required for writing labels
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
sync-labels: true
|
||||||
104
.github/workflows/validate.yml
vendored
104
.github/workflows/validate.yml
vendored
@@ -1,5 +1,14 @@
|
|||||||
name: validate
|
name: validate
|
||||||
|
|
||||||
|
# Default to 'contents: read', which grants actions to read commits.
|
||||||
|
#
|
||||||
|
# If any permission is set, any permission not included in the list is
|
||||||
|
# implicitly set to "none".
|
||||||
|
#
|
||||||
|
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -13,30 +22,89 @@ on:
|
|||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
paths-ignore:
|
||||||
- 'master'
|
- '.github/releases.json'
|
||||||
- 'v[0-9]*'
|
|
||||||
|
env:
|
||||||
|
SETUP_BUILDX_VERSION: "edge"
|
||||||
|
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate:
|
prepare:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
strategy:
|
outputs:
|
||||||
fail-fast: false
|
includes: ${{ steps.matrix.outputs.includes }}
|
||||||
matrix:
|
|
||||||
target:
|
|
||||||
- lint
|
|
||||||
- validate-vendor
|
|
||||||
- validate-docs
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Matrix
|
||||||
|
id: matrix
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
let def = {};
|
||||||
|
await core.group(`Parsing definition`, async () => {
|
||||||
|
const printEnv = Object.assign({}, process.env, {
|
||||||
|
GOLANGCI_LINT_MULTIPLATFORM: process.env.GITHUB_REPOSITORY === 'docker/buildx' ? '1' : ''
|
||||||
|
});
|
||||||
|
const resPrint = await exec.getExecOutput('docker', ['buildx', 'bake', 'validate', '--print'], {
|
||||||
|
ignoreReturnCode: true,
|
||||||
|
env: printEnv
|
||||||
|
});
|
||||||
|
if (resPrint.stderr.length > 0 && resPrint.exitCode != 0) {
|
||||||
|
throw new Error(res.stderr);
|
||||||
|
}
|
||||||
|
def = JSON.parse(resPrint.stdout.trim());
|
||||||
|
});
|
||||||
|
await core.group(`Generating matrix`, async () => {
|
||||||
|
const includes = [];
|
||||||
|
for (const targetName of Object.keys(def.target)) {
|
||||||
|
const target = def.target[targetName];
|
||||||
|
if (target.platforms && target.platforms.length > 0) {
|
||||||
|
target.platforms.forEach(platform => {
|
||||||
|
includes.push({
|
||||||
|
target: targetName,
|
||||||
|
platform: platform
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
includes.push({
|
||||||
|
target: targetName
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
core.info(JSON.stringify(includes, null, 2));
|
||||||
|
core.setOutput('includes', JSON.stringify(includes));
|
||||||
|
});
|
||||||
|
|
||||||
|
validate:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- prepare
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include: ${{ fromJson(needs.prepare.outputs.includes) }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
||||||
|
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Run
|
name: Validate
|
||||||
run: |
|
uses: docker/bake-action@v6
|
||||||
make ${{ matrix.target }}
|
with:
|
||||||
|
targets: ${{ matrix.target }}
|
||||||
|
set: |
|
||||||
|
*.platform=${{ matrix.platform }}
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1 @@
|
|||||||
bin
|
/bin
|
||||||
coverage
|
|
||||||
cross-out
|
|
||||||
release-out
|
|
||||||
|
|||||||
115
.golangci.yml
115
.golangci.yml
@@ -1,40 +1,119 @@
|
|||||||
run:
|
run:
|
||||||
timeout: 10m
|
timeout: 30m
|
||||||
skip-files:
|
|
||||||
- ".*\\.pb\\.go$"
|
|
||||||
|
|
||||||
modules-download-mode: vendor
|
modules-download-mode: vendor
|
||||||
|
|
||||||
build-tags:
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- bodyclose
|
||||||
- govet
|
|
||||||
- deadcode
|
|
||||||
- depguard
|
- depguard
|
||||||
|
- forbidigo
|
||||||
|
- gocritic
|
||||||
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- makezero
|
||||||
- misspell
|
- misspell
|
||||||
- unused
|
- noctx
|
||||||
- varcheck
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
|
- testifylint
|
||||||
- typecheck
|
- typecheck
|
||||||
- structcheck
|
- unused
|
||||||
|
- whitespace
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gocritic:
|
||||||
|
disabled-checks:
|
||||||
|
- "ifElseChain"
|
||||||
|
- "assignOp"
|
||||||
|
- "appendAssign"
|
||||||
|
- "singleCaseSwitch"
|
||||||
|
- "exitAfterDefer" # FIXME
|
||||||
|
importas:
|
||||||
|
alias:
|
||||||
|
# Enforce alias to prevent it accidentally being used instead of
|
||||||
|
# buildkit errdefs package (or vice-versa).
|
||||||
|
- pkg: "github.com/containerd/errdefs"
|
||||||
|
alias: "cerrdefs"
|
||||||
|
# Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
|
||||||
|
- pkg: "github.com/docker/docker/client"
|
||||||
|
alias: "dockerclient"
|
||||||
|
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
alias: "ocispecs"
|
||||||
|
- pkg: "github.com/opencontainers/go-digest"
|
||||||
|
alias: "digest"
|
||||||
|
govet:
|
||||||
|
enable:
|
||||||
|
- nilness
|
||||||
|
- unusedwrite
|
||||||
|
# enable-all: true
|
||||||
|
# disable:
|
||||||
|
# - fieldalignment
|
||||||
|
# - shadow
|
||||||
depguard:
|
depguard:
|
||||||
list-type: blacklist
|
rules:
|
||||||
include-go-root: true
|
main:
|
||||||
packages:
|
deny:
|
||||||
# The io/ioutil package has been deprecated.
|
- pkg: "github.com/containerd/containerd/errdefs"
|
||||||
# https://go.dev/doc/go1.16#ioutil
|
desc: The containerd errdefs package was migrated to a separate module. Use github.com/containerd/errdefs instead.
|
||||||
- io/ioutil
|
- pkg: "github.com/containerd/containerd/log"
|
||||||
|
desc: The containerd log package was migrated to a separate module. Use github.com/containerd/log instead.
|
||||||
|
- pkg: "github.com/containerd/containerd/platforms"
|
||||||
|
desc: The containerd platforms package was migrated to a separate module. Use github.com/containerd/platforms instead.
|
||||||
|
- pkg: "io/ioutil"
|
||||||
|
desc: The io/ioutil package has been deprecated.
|
||||||
|
forbidigo:
|
||||||
|
forbid:
|
||||||
|
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||||
|
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||||
|
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||||
|
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||||
|
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||||
|
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G204 # Audit use of command execution
|
||||||
|
- G402 # TLS MinVersion too low
|
||||||
|
- G115 # integer overflow conversion (TODO: verify these)
|
||||||
|
config:
|
||||||
|
G306: "0644"
|
||||||
|
testifylint:
|
||||||
|
disable:
|
||||||
|
# disable rules that reduce the test condition
|
||||||
|
- "empty"
|
||||||
|
- "bool-compare"
|
||||||
|
- "len"
|
||||||
|
- "negative-positive"
|
||||||
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
exclude-files:
|
||||||
|
- ".*\\.pb\\.go$"
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- linters:
|
- linters:
|
||||||
- revive
|
- revive
|
||||||
text: "stutters"
|
text: "stutters"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "empty-block"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "superfluous-else"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "unused-parameter"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "redefines-builtin-id"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "if-return"
|
||||||
|
|
||||||
|
# show all
|
||||||
|
max-issues-per-linter: 0
|
||||||
|
max-same-issues: 0
|
||||||
|
|||||||
14
.mailmap
14
.mailmap
@@ -1,11 +1,25 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
|||||||
69
AUTHORS
69
AUTHORS
@@ -1,45 +1,112 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
accetto <34798830+accetto@users.noreply.github.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
|
Aleksa Sarai <cyphar@cyphar.com>
|
||||||
Alex Couture-Beil <alex@earthly.dev>
|
Alex Couture-Beil <alex@earthly.dev>
|
||||||
Andrew Haines <andrew.haines@zencargo.com>
|
Andrew Haines <andrew.haines@zencargo.com>
|
||||||
|
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||||
Anthony Poschen <zanven42@gmail.com>
|
Anthony Poschen <zanven42@gmail.com>
|
||||||
|
Arnold Sobanski <arnold@l4g.dev>
|
||||||
Artur Klauser <Artur.Klauser@computer.org>
|
Artur Klauser <Artur.Klauser@computer.org>
|
||||||
Batuhan Apaydın <developerguy2@gmail.com>
|
Avi Deitcher <avi@deitcher.net>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Ben Peachey <potherca@gmail.com>
|
||||||
|
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||||
Bin Du <bindu@microsoft.com>
|
Bin Du <bindu@microsoft.com>
|
||||||
Brandon Philips <brandon@ifup.org>
|
Brandon Philips <brandon@ifup.org>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Bryce Lampe <bryce@pulumi.com>
|
||||||
|
Cameron Adams <pnzreba@gmail.com>
|
||||||
|
Christian Dupuis <cd@atomist.com>
|
||||||
|
Cory Snider <csnider@mirantis.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
|
David Gageot <david.gageot@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Scott <dave@recoil.org>
|
||||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||||
Devin Bayer <dev@doubly.so>
|
Devin Bayer <dev@doubly.so>
|
||||||
Djordje Lukic <djordje.lukic@docker.com>
|
Djordje Lukic <djordje.lukic@docker.com>
|
||||||
|
Dmitry Makovey <dmakovey@gitlab.com>
|
||||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||||
Donghui Wang <977675308@qq.com>
|
Donghui Wang <977675308@qq.com>
|
||||||
|
Doug Borg <dougborg@apple.com>
|
||||||
|
Edgar Lee <edgarl@netflix.com>
|
||||||
|
Eli Treuherz <et@arenko.group>
|
||||||
|
Eliott Wiener <eliottwiener@gmail.com>
|
||||||
|
Elran Shefer <elran.shefer@velocity.tech>
|
||||||
faust <faustin@fala.red>
|
faust <faustin@fala.red>
|
||||||
Felipe Santos <felipecassiors@gmail.com>
|
Felipe Santos <felipecassiors@gmail.com>
|
||||||
|
Felix de Souza <fdesouza@palantir.com>
|
||||||
Fernando Miguel <github@FernandoMiguel.net>
|
Fernando Miguel <github@FernandoMiguel.net>
|
||||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||||
gracenoah <gracenoahgh@gmail.com>
|
gracenoah <gracenoahgh@gmail.com>
|
||||||
|
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||||
|
guoguangwu <guoguangwu@magic-shield.com>
|
||||||
Hollow Man <hollowman@hollowman.ml>
|
Hollow Man <hollowman@hollowman.ml>
|
||||||
|
Ian King'ori <kingorim.ian@gmail.com>
|
||||||
|
idnandre <andre@idntimes.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||||
|
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||||
|
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||||
|
Jonathan Piché <jpiche@coveo.com>
|
||||||
|
Justin Chadwell <me@jedevc.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
khs1994 <khs1994@khs1994.com>
|
khs1994 <khs1994@khs1994.com>
|
||||||
|
Kijima Daigo <norimaking777@gmail.com>
|
||||||
|
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||||
Kotaro Adachi <k33asby@gmail.com>
|
Kotaro Adachi <k33asby@gmail.com>
|
||||||
|
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||||
l00397676 <lujingxiao@huawei.com>
|
l00397676 <lujingxiao@huawei.com>
|
||||||
|
Laura Brehm <laurabrehm@hey.com>
|
||||||
|
Laurent Goderre <laurent.goderre@docker.com>
|
||||||
|
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||||
|
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||||
Michal Augustyn <michal.augustyn@mail.com>
|
Michal Augustyn <michal.augustyn@mail.com>
|
||||||
|
Milas Bowman <milas.bowman@docker.com>
|
||||||
|
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||||
|
Moleus <fafufuburr@gmail.com>
|
||||||
|
Nick Santos <nick.santos@docker.com>
|
||||||
|
Nick Sieger <nick@nicksieger.com>
|
||||||
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
|
Niklas Gehlen <niklas@namespacelabs.com>
|
||||||
Patrick Van Stee <patrick@vanstee.me>
|
Patrick Van Stee <patrick@vanstee.me>
|
||||||
|
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||||
|
Phong Tran <tran.pho@northeastern.edu>
|
||||||
|
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||||
|
Rob Murray <rob.murray@docker.com>
|
||||||
|
robertlestak <robert.lestak@umusic.com>
|
||||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||||
|
Sean P. Kane <spkane00@gmail.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Simon A. Eugster <simon.eu@gmail.com>
|
||||||
Solomon Hykes <sh.github.6811@hykes.org>
|
Solomon Hykes <sh.github.6811@hykes.org>
|
||||||
|
Sumner Warren <sumner.warren@gmail.com>
|
||||||
Sune Keller <absukl@almbrand.dk>
|
Sune Keller <absukl@almbrand.dk>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
|
Tim Smith <tismith@rvohealth.com>
|
||||||
|
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||||
|
Tyler Smith <tylerlwsmith@gmail.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Ulysses Souza <ulyssessouza@gmail.com>
|
Ulysses Souza <ulyssessouza@gmail.com>
|
||||||
|
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||||
Wang Jinglei <morlay.null@gmail.com>
|
Wang Jinglei <morlay.null@gmail.com>
|
||||||
|
Wei <daviseago@gmail.com>
|
||||||
|
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||||
Xiang Dai <764524258@qq.com>
|
Xiang Dai <764524258@qq.com>
|
||||||
|
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||||
zelahi <elahi.zuhayr@gmail.com>
|
zelahi <elahi.zuhayr@gmail.com>
|
||||||
|
Zero <tobewhatwewant@gmail.com>
|
||||||
|
zhyon404 <zhyong4@gmail.com>
|
||||||
|
Zsolt <zsolt.szeberenyi@figured.com>
|
||||||
|
|||||||
138
Dockerfile
138
Dockerfile
@@ -1,15 +1,30 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.18
|
ARG GO_VERSION=1.23
|
||||||
ARG XX_VERSION=1.1.2
|
ARG ALPINE_VERSION=3.21
|
||||||
ARG DOCKERD_VERSION=20.10.14
|
ARG XX_VERSION=1.6.1
|
||||||
|
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
# for testing
|
||||||
|
ARG DOCKER_VERSION=28.0.0
|
||||||
|
ARG DOCKER_VERSION_ALT_27=27.5.1
|
||||||
|
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||||
|
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||||
|
ARG GOTESTSUM_VERSION=v1.12.0
|
||||||
|
ARG REGISTRY_VERSION=2.8.3
|
||||||
|
ARG BUILDKIT_VERSION=v0.20.2
|
||||||
|
ARG UNDOCK_VERSION=0.9.0
|
||||||
|
|
||||||
# xx is a helper for cross-compilation
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
|
||||||
|
FROM registry:$REGISTRY_VERSION AS registry
|
||||||
|
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||||
|
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||||
|
|
||||||
FROM golatest AS gobase
|
FROM golatest AS gobase
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
@@ -18,23 +33,63 @@ ENV GOFLAGS=-mod=vendor
|
|||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
|
FROM gobase AS gotestsum
|
||||||
|
ARG GOTESTSUM_VERSION
|
||||||
|
ENV GOFLAGS=""
|
||||||
|
RUN --mount=target=/root/.cache,type=cache <<EOT
|
||||||
|
set -ex
|
||||||
|
go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}"
|
||||||
|
go install "github.com/wadey/gocovmerge@latest"
|
||||||
|
mkdir /out
|
||||||
|
/go/bin/gotestsum --version
|
||||||
|
mv /go/bin/gotestsum /out
|
||||||
|
mv /go/bin/gocovmerge /out
|
||||||
|
EOT
|
||||||
|
COPY --chmod=755 <<"EOF" /out/gotestsumandcover
|
||||||
|
#!/bin/sh
|
||||||
|
set -x
|
||||||
|
if [ -z "$GO_TEST_COVERPROFILE" ]; then
|
||||||
|
exec gotestsum "$@"
|
||||||
|
fi
|
||||||
|
coverdir="$(dirname "$GO_TEST_COVERPROFILE")"
|
||||||
|
mkdir -p "$coverdir/helpers"
|
||||||
|
gotestsum "$@" "-coverprofile=$GO_TEST_COVERPROFILE"
|
||||||
|
ecode=$?
|
||||||
|
go tool covdata textfmt -i=$coverdir/helpers -o=$coverdir/helpers-report.txt
|
||||||
|
gocovmerge "$coverdir/helpers-report.txt" "$GO_TEST_COVERPROFILE" > "$coverdir/merged-report.txt"
|
||||||
|
mv "$coverdir/merged-report.txt" "$GO_TEST_COVERPROFILE"
|
||||||
|
rm "$coverdir/helpers-report.txt"
|
||||||
|
for f in "$coverdir/helpers"/*; do
|
||||||
|
rm "$f"
|
||||||
|
done
|
||||||
|
rmdir "$coverdir/helpers"
|
||||||
|
exit $ecode
|
||||||
|
EOF
|
||||||
|
|
||||||
FROM gobase AS buildx-version
|
FROM gobase AS buildx-version
|
||||||
RUN --mount=target=. \
|
RUN --mount=type=bind,target=. <<EOT
|
||||||
PKG=github.com/docker/buildx VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
set -e
|
||||||
echo "-X ${PKG}/version.Version=${VERSION} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
|
mkdir /buildx-version
|
||||||
echo -n "${VERSION}" | tee /tmp/.version;
|
echo -n "$(./hack/git-meta version)" | tee /buildx-version/version
|
||||||
|
echo -n "$(./hack/git-meta revision)" | tee /buildx-version/revision
|
||||||
|
EOT
|
||||||
|
|
||||||
FROM gobase AS buildx-build
|
FROM gobase AS buildx-build
|
||||||
ARG LDFLAGS="-w -s"
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
|
ARG GO_EXTRA_FLAGS
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=type=bind,target=. \
|
||||||
--mount=type=cache,target=/root/.cache \
|
--mount=type=cache,target=/root/.cache \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT
|
||||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
|
set -e
|
||||||
xx-verify --static /usr/bin/buildx
|
xx-go --wrap
|
||||||
|
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
||||||
|
file /usr/bin/docker-buildx
|
||||||
|
xx-verify --static /usr/bin/docker-buildx
|
||||||
|
EOT
|
||||||
|
|
||||||
FROM gobase AS test
|
FROM gobase AS test
|
||||||
|
ENV SKIP_INTEGRATION_TESTS=1
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=type=bind,target=. \
|
||||||
--mount=type=cache,target=/root/.cache \
|
--mount=type=cache,target=/root/.cache \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
@@ -45,30 +100,67 @@ FROM scratch AS test-coverage
|
|||||||
COPY --from=test /tmp/coverage.txt /coverage.txt
|
COPY --from=test /tmp/coverage.txt /coverage.txt
|
||||||
|
|
||||||
FROM scratch AS binaries-unix
|
FROM scratch AS binaries-unix
|
||||||
COPY --link --from=buildx-build /usr/bin/buildx /
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
||||||
|
|
||||||
FROM binaries-unix AS binaries-darwin
|
FROM binaries-unix AS binaries-darwin
|
||||||
|
FROM binaries-unix AS binaries-freebsd
|
||||||
FROM binaries-unix AS binaries-linux
|
FROM binaries-unix AS binaries-linux
|
||||||
|
FROM binaries-unix AS binaries-netbsd
|
||||||
|
FROM binaries-unix AS binaries-openbsd
|
||||||
|
|
||||||
FROM scratch AS binaries-windows
|
FROM scratch AS binaries-windows
|
||||||
COPY --link --from=buildx-build /usr/bin/buildx /buildx.exe
|
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
||||||
|
|
||||||
FROM binaries-$TARGETOS AS binaries
|
FROM binaries-$TARGETOS AS binaries
|
||||||
|
# enable scanning for this stage
|
||||||
|
ARG BUILDKIT_SBOM_SCAN_STAGE=true
|
||||||
|
|
||||||
|
FROM gobase AS integration-test-base
|
||||||
|
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
btrfs-progs \
|
||||||
|
e2fsprogs \
|
||||||
|
e2fsprogs-extra \
|
||||||
|
ip6tables \
|
||||||
|
iptables \
|
||||||
|
openssl \
|
||||||
|
shadow-uidmap \
|
||||||
|
xfsprogs \
|
||||||
|
xz
|
||||||
|
COPY --link --from=gotestsum /out /usr/bin/
|
||||||
|
COPY --link --from=registry /bin/registry /usr/bin/
|
||||||
|
COPY --link --from=docker-engine / /usr/bin/
|
||||||
|
COPY --link --from=docker-cli / /usr/bin/
|
||||||
|
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
|
||||||
|
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
|
||||||
|
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
|
||||||
|
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||||
|
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||||
|
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||||
|
COPY --link --from=binaries /buildx /usr/bin/
|
||||||
|
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
|
||||||
|
|
||||||
|
FROM integration-test-base AS integration-test
|
||||||
|
COPY . .
|
||||||
|
|
||||||
# Release
|
# Release
|
||||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=binaries \
|
RUN --mount=from=binaries \
|
||||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \
|
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT
|
||||||
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
set -e
|
||||||
|
mkdir -p /out
|
||||||
|
cp buildx* "/out/buildx-$(cat /buildx-version/version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
||||||
|
EOT
|
||||||
|
|
||||||
FROM scratch AS release
|
FROM scratch AS release
|
||||||
COPY --from=releaser /out/ /
|
COPY --from=releaser /out/ /
|
||||||
|
|
||||||
# Shell
|
# Shell
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
FROM docker:$DOCKER_VERSION AS dockerd-release
|
||||||
FROM alpine AS shell
|
FROM alpine:${ALPINE_VERSION} AS shell
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||||
|
|||||||
@@ -153,6 +153,7 @@ made through a pull request.
|
|||||||
"akihirosuda",
|
"akihirosuda",
|
||||||
"crazy-max",
|
"crazy-max",
|
||||||
"jedevc",
|
"jedevc",
|
||||||
|
"jsternberg",
|
||||||
"tiborvass",
|
"tiborvass",
|
||||||
"tonistiigi",
|
"tonistiigi",
|
||||||
]
|
]
|
||||||
@@ -194,6 +195,11 @@ made through a pull request.
|
|||||||
Email = "me@jedevc.com"
|
Email = "me@jedevc.com"
|
||||||
GitHub = "jedevc"
|
GitHub = "jedevc"
|
||||||
|
|
||||||
|
[people.jsternberg]
|
||||||
|
Name = "Jonathan Sternberg"
|
||||||
|
Email = "jonathan.sternberg@docker.com"
|
||||||
|
GitHub = "jsternberg"
|
||||||
|
|
||||||
[people.thajeztah]
|
[people.thajeztah]
|
||||||
Name = "Sebastiaan van Stijn"
|
Name = "Sebastiaan van Stijn"
|
||||||
Email = "github@gone.nl"
|
Email = "github@gone.nl"
|
||||||
|
|||||||
58
Makefile
58
Makefile
@@ -4,59 +4,71 @@ else ifneq (, $(shell docker buildx version))
|
|||||||
export BUILDX_CMD = docker buildx
|
export BUILDX_CMD = docker buildx
|
||||||
else ifneq (, $(shell which buildx))
|
else ifneq (, $(shell which buildx))
|
||||||
export BUILDX_CMD = $(which buildx)
|
export BUILDX_CMD = $(which buildx)
|
||||||
else
|
|
||||||
$(error "Buildx is required: https://github.com/docker/buildx#installing")
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
export BIN_OUT = ./bin
|
export BUILDX_CMD ?= docker buildx
|
||||||
export RELEASE_OUT = ./release-out
|
|
||||||
|
|
||||||
|
BAKE_TARGETS := binaries binaries-cross lint lint-gopls validate-vendor validate-docs validate-authors validate-generated-files
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all: binaries
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build:
|
||||||
|
./hack/build
|
||||||
|
|
||||||
|
.PHONY: shell
|
||||||
shell:
|
shell:
|
||||||
./hack/shell
|
./hack/shell
|
||||||
|
|
||||||
binaries:
|
.PHONY: $(BAKE_TARGETS)
|
||||||
$(BUILDX_CMD) bake binaries
|
$(BAKE_TARGETS):
|
||||||
|
$(BUILDX_CMD) bake $@
|
||||||
binaries-cross:
|
|
||||||
$(BUILDX_CMD) bake binaries-cross
|
|
||||||
|
|
||||||
|
.PHONY: install
|
||||||
install: binaries
|
install: binaries
|
||||||
mkdir -p ~/.docker/cli-plugins
|
mkdir -p ~/.docker/cli-plugins
|
||||||
install bin/buildx ~/.docker/cli-plugins/docker-buildx
|
install bin/build/buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
|
.PHONY: release
|
||||||
release:
|
release:
|
||||||
./hack/release
|
./hack/release
|
||||||
|
|
||||||
validate-all: lint test validate-vendor validate-docs
|
.PHONY: validate-all
|
||||||
|
validate-all: lint test validate-vendor validate-docs validate-generated-files
|
||||||
lint:
|
|
||||||
$(BUILDX_CMD) bake lint
|
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
$(BUILDX_CMD) bake test
|
./hack/test
|
||||||
|
|
||||||
validate-vendor:
|
.PHONY: test-unit
|
||||||
$(BUILDX_CMD) bake validate-vendor
|
test-unit:
|
||||||
|
TESTPKGS=./... SKIP_INTEGRATION_TESTS=1 ./hack/test
|
||||||
|
|
||||||
validate-docs:
|
.PHONY: test
|
||||||
$(BUILDX_CMD) bake validate-docs
|
test-integration:
|
||||||
|
TESTPKGS=./tests ./hack/test
|
||||||
validate-authors:
|
|
||||||
$(BUILDX_CMD) bake validate-authors
|
|
||||||
|
|
||||||
|
.PHONY: test-driver
|
||||||
test-driver:
|
test-driver:
|
||||||
./hack/test-driver
|
./hack/test-driver
|
||||||
|
|
||||||
|
.PHONY: vendor
|
||||||
vendor:
|
vendor:
|
||||||
./hack/update-vendor
|
./hack/update-vendor
|
||||||
|
|
||||||
|
.PHONY: docs
|
||||||
docs:
|
docs:
|
||||||
./hack/update-docs
|
./hack/update-docs
|
||||||
|
|
||||||
|
.PHONY: authors
|
||||||
authors:
|
authors:
|
||||||
$(BUILDX_CMD) bake update-authors
|
$(BUILDX_CMD) bake update-authors
|
||||||
|
|
||||||
|
.PHONY: mod-outdated
|
||||||
mod-outdated:
|
mod-outdated:
|
||||||
$(BUILDX_CMD) bake mod-outdated
|
$(BUILDX_CMD) bake mod-outdated
|
||||||
|
|
||||||
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors
|
.PHONY: generated-files
|
||||||
|
generated-files:
|
||||||
|
$(BUILDX_CMD) bake update-generated-files
|
||||||
|
|||||||
453
PROJECT.md
Normal file
453
PROJECT.md
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
# Project processing guide <!-- omit from toc -->
|
||||||
|
|
||||||
|
- [Project scope](#project-scope)
|
||||||
|
- [Labels](#labels)
|
||||||
|
- [Global](#global)
|
||||||
|
- [`area/`](#area)
|
||||||
|
- [`exp/`](#exp)
|
||||||
|
- [`impact/`](#impact)
|
||||||
|
- [`kind/`](#kind)
|
||||||
|
- [`needs/`](#needs)
|
||||||
|
- [`priority/`](#priority)
|
||||||
|
- [`status/`](#status)
|
||||||
|
- [Types of releases](#types-of-releases)
|
||||||
|
- [Feature releases](#feature-releases)
|
||||||
|
- [Release Candidates](#release-candidates)
|
||||||
|
- [Support Policy](#support-policy)
|
||||||
|
- [Contributing to Releases](#contributing-to-releases)
|
||||||
|
- [Patch releases](#patch-releases)
|
||||||
|
- [Milestones](#milestones)
|
||||||
|
- [Triage process](#triage-process)
|
||||||
|
- [Verify essential information](#verify-essential-information)
|
||||||
|
- [Classify the issue](#classify-the-issue)
|
||||||
|
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||||
|
- [Issue lifecycle](#issue-lifecycle)
|
||||||
|
- [Examples](#examples)
|
||||||
|
- [Submitting a bug](#submitting-a-bug)
|
||||||
|
- [Pull request review process](#pull-request-review-process)
|
||||||
|
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||||
|
- [Moving to a discussion](#moving-to-a-discussion)
|
||||||
|
- [Workflow automation](#workflow-automation)
|
||||||
|
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||||
|
- [Updating dependencies](#updating-dependencies)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project scope
|
||||||
|
|
||||||
|
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||||
|
|
||||||
|
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||||
|
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||||
|
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||||
|
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||||
|
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||||
|
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||||
|
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||||
|
|
||||||
|
## Labels
|
||||||
|
|
||||||
|
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||||
|
|
||||||
|
### Global
|
||||||
|
|
||||||
|
General attributes that can apply to nearly any issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||||
|
| `bot` | Issues, PRs | Created by a bot |
|
||||||
|
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||||
|
| `help wanted` | Issues, PRs | Assistance requested |
|
||||||
|
| `lgtm` | PRs | “Looks good to me” approval |
|
||||||
|
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||||
|
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||||
|
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||||
|
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||||
|
|
||||||
|
### `area/`
|
||||||
|
|
||||||
|
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------------------ | ---------- | -------------------------- |
|
||||||
|
| `area/bake` | Any | `bake` |
|
||||||
|
| `area/bake/compose` | Any | `bake/compose` |
|
||||||
|
| `area/build` | Any | `build` |
|
||||||
|
| `area/builder` | Any | `builder` |
|
||||||
|
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||||
|
| `area/cache` | Any | `cache` |
|
||||||
|
| `area/checks` | Any | `checks` |
|
||||||
|
| `area/ci` | Any | Project CI |
|
||||||
|
| `area/cli` | Any | `cli` |
|
||||||
|
| `area/controller` | Any | `controller` |
|
||||||
|
| `area/debug` | Any | `debug` |
|
||||||
|
| `area/dependencies` | Any | Project dependencies |
|
||||||
|
| `area/dockerfile` | Any | `dockerfile` |
|
||||||
|
| `area/docs` | Any | `docs` |
|
||||||
|
| `area/driver` | Any | `driver` |
|
||||||
|
| `area/driver/docker` | Any | `driver/docker` |
|
||||||
|
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||||
|
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||||
|
| `area/driver/remote` | Any | `driver/remote` |
|
||||||
|
| `area/feature-parity` | Any | `feature-parity` |
|
||||||
|
| `area/github-actions` | Any | `github-actions` |
|
||||||
|
| `area/hack` | Any | Project hack/support |
|
||||||
|
| `area/imagetools` | Any | `imagetools` |
|
||||||
|
| `area/metrics` | Any | `metrics` |
|
||||||
|
| `area/moby` | Any | Relates to `moby/moby` |
|
||||||
|
| `area/project` | Any | Project support |
|
||||||
|
| `area/qemu` | Any | `qemu` |
|
||||||
|
| `area/tests` | Any | Project testing |
|
||||||
|
| `area/windows` | Any | `windows` |
|
||||||
|
|
||||||
|
### `exp/`
|
||||||
|
|
||||||
|
Estimated experience level to complete the item
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||||
|
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||||
|
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||||
|
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||||
|
|
||||||
|
### `impact/`
|
||||||
|
|
||||||
|
Potential impact areas of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| -------------------- | ---------- | -------------------------------------------------- |
|
||||||
|
| `impact/breaking` | PR | Change is API-breaking |
|
||||||
|
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||||
|
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||||
|
|
||||||
|
|
||||||
|
### `kind/`
|
||||||
|
|
||||||
|
The type of issue, pull request or discussion
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||||
|
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||||
|
| `kind/chore` | Issue, PR | Project support tasks |
|
||||||
|
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||||
|
| `kind/duplicate` | Any | Duplicate of another item |
|
||||||
|
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||||
|
| `kind/feature` | Any | A brand new feature |
|
||||||
|
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||||
|
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||||
|
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||||
|
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||||
|
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||||
|
|
||||||
|
### `needs/`
|
||||||
|
|
||||||
|
Actions or missing requirements needed by the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||||
|
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||||
|
| `needs/code-review` | PR | Needs review of code |
|
||||||
|
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||||
|
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||||
|
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||||
|
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||||
|
| `needs/issue` | PR | Needs an issue |
|
||||||
|
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||||
|
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||||
|
| `needs/more-info` | Any | Needs more information from the author |
|
||||||
|
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||||
|
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||||
|
| `needs/pull-request` | Issue | Needs a pull request |
|
||||||
|
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||||
|
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||||
|
|
||||||
|
### `priority/`
|
||||||
|
|
||||||
|
Level of urgency of a `kind/bug` issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||||
|
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||||
|
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||||
|
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||||
|
|
||||||
|
### `status/`
|
||||||
|
|
||||||
|
Current lifecycle state of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||||
|
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||||
|
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||||
|
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||||
|
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||||
|
| `status/transfer` | Any | Transferred to another project |
|
||||||
|
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||||
|
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||||
|
|
||||||
|
## Types of releases
|
||||||
|
|
||||||
|
This project has feature releases, patch releases, and security releases.
|
||||||
|
|
||||||
|
### Feature releases
|
||||||
|
|
||||||
|
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||||
|
|
||||||
|
#### Release Candidates
|
||||||
|
|
||||||
|
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||||
|
|
||||||
|
#### Support Policy
|
||||||
|
|
||||||
|
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||||
|
|
||||||
|
#### Contributing to Releases
|
||||||
|
|
||||||
|
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||||
|
|
||||||
|
### Patch releases
|
||||||
|
|
||||||
|
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||||
|
|
||||||
|
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||||
|
|
||||||
|
Following PRs are included in patch releases
|
||||||
|
|
||||||
|
- `priority/P0` fixes
|
||||||
|
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||||
|
- `priority/P2` fixes, only if (both required)
|
||||||
|
- proposed by maintainer
|
||||||
|
- the patch is trivial and self-contained
|
||||||
|
- Documentation-only patches
|
||||||
|
- Vendored dependency updates, only if:
|
||||||
|
- Fixing (qualifying) bug or security issue in Buildx
|
||||||
|
- The patch is small, else a forked version of the dependency with only the patches required
|
||||||
|
|
||||||
|
New features do not qualify for patch release.
|
||||||
|
|
||||||
|
## Milestones
|
||||||
|
|
||||||
|
Milestones are used to help identify what releases a contribution will be in.
|
||||||
|
|
||||||
|
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||||
|
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||||
|
|
||||||
|
## Triage process
|
||||||
|
|
||||||
|
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||||
|
|
||||||
|
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||||
|
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||||
|
- Lowering the issue count by preventing duplicate issues.
|
||||||
|
- Streamlining the development process by preventing duplicate discussions.
|
||||||
|
|
||||||
|
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||||
|
|
||||||
|
1. Verify essential information
|
||||||
|
2. Classify the issue
|
||||||
|
3. Prioritizing the issue
|
||||||
|
|
||||||
|
### Verify essential information
|
||||||
|
|
||||||
|
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||||
|
|
||||||
|
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||||
|
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||||
|
|
||||||
|
### Classify the issue
|
||||||
|
|
||||||
|
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||||
|
|
||||||
|
- (Required) One or more [`area/*`](#area) labels
|
||||||
|
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||||
|
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||||
|
|
||||||
|
When assigning a decision the following labels should be present:
|
||||||
|
|
||||||
|
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||||
|
|
||||||
|
Additional labels can provide more clarity:
|
||||||
|
|
||||||
|
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||||
|
- Zero or more [`impact/*`](#impact) labels
|
||||||
|
- One [`exp/*`](#exp) label
|
||||||
|
|
||||||
|
## Prioritization guidelines for `kind/bug`
|
||||||
|
|
||||||
|
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||||
|
|
||||||
|
**priority/P0**
|
||||||
|
|
||||||
|
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Regression in a critical code path
|
||||||
|
- Panic in a critical code path
|
||||||
|
- Corruption in critical code path or rest of the system
|
||||||
|
- Leaked zero-day critical security
|
||||||
|
|
||||||
|
**priority/P1**
|
||||||
|
|
||||||
|
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Any regression, panic
|
||||||
|
- Measurable performance regression
|
||||||
|
- A major bug in a new feature in the latest release
|
||||||
|
- Incompatibility with upgraded external dependency
|
||||||
|
|
||||||
|
**priority/P2**
|
||||||
|
|
||||||
|
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Confirmed bugs
|
||||||
|
- Bugs in non-default configurations
|
||||||
|
- Most enhancements
|
||||||
|
|
||||||
|
## Issue lifecycle
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
create([New issue]) --> triage
|
||||||
|
subgraph triage[Triage Loop]
|
||||||
|
review[Review]
|
||||||
|
end
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept[Accept]
|
||||||
|
close[Close]
|
||||||
|
end
|
||||||
|
triage -- if accepted --> accept[Assign status, milestone]
|
||||||
|
triage -- if rejected --> close[Assign status, close issue]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
#### Submitting a bug
|
||||||
|
|
||||||
|
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
|
||||||
|
new([New issue])
|
||||||
|
|
||||||
|
subgraph triage[Triage]
|
||||||
|
direction LR
|
||||||
|
|
||||||
|
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||||
|
style create text-align:left
|
||||||
|
|
||||||
|
subgraph review[Review]
|
||||||
|
direction TB
|
||||||
|
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style classify text-align:left
|
||||||
|
|
||||||
|
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style update text-align:left
|
||||||
|
|
||||||
|
classify --> update
|
||||||
|
update --> classify
|
||||||
|
end
|
||||||
|
|
||||||
|
create --> review
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||||
|
style accept text-align: left
|
||||||
|
end
|
||||||
|
|
||||||
|
new --> triage
|
||||||
|
triage --> decision
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pull request review process
|
||||||
|
|
||||||
|
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||||
|
|
||||||
|
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||||
|
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||||
|
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
triage([Triage])
|
||||||
|
draft[Draft PR]
|
||||||
|
review[PR Review]
|
||||||
|
closed{{Close PR}}
|
||||||
|
merge{{Merge PR}}
|
||||||
|
|
||||||
|
subgraph feedback1[Feedback Loop]
|
||||||
|
draft
|
||||||
|
end
|
||||||
|
subgraph feedback2[Feedback Loop]
|
||||||
|
review
|
||||||
|
end
|
||||||
|
|
||||||
|
triage --> draft
|
||||||
|
draft --> review
|
||||||
|
review --> closed
|
||||||
|
review --> draft
|
||||||
|
review --> merge
|
||||||
|
```
|
||||||
|
|
||||||
|
## Handling stalled issues and pull requests
|
||||||
|
|
||||||
|
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||||
|
|
||||||
|
**For Authors:**
|
||||||
|
|
||||||
|
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||||
|
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||||
|
|
||||||
|
**For Maintainers:**
|
||||||
|
|
||||||
|
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||||
|
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||||
|
|
||||||
|
**Stale and Rotten Policy:**
|
||||||
|
|
||||||
|
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||||
|
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||||
|
|
||||||
|
**Skipping Stale Processing:**
|
||||||
|
|
||||||
|
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||||
|
|
||||||
|
**Exceptions to Stale Processing:**
|
||||||
|
|
||||||
|
- Issues or PRs marked as **`frozen`**.
|
||||||
|
- Issues or PRs assigned to a milestone.
|
||||||
|
|
||||||
|
## Moving to a discussion
|
||||||
|
|
||||||
|
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||||
|
|
||||||
|
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||||
|
|
||||||
|
## Workflow automation
|
||||||
|
|
||||||
|
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||||
|
|
||||||
|
- Stale issue or pull request processing
|
||||||
|
- Auto-labeling actions
|
||||||
|
- Auto-response actions
|
||||||
|
- Label carry over from issue to pull request
|
||||||
|
|
||||||
|
### Exempting an issue/PR from stale bot processing
|
||||||
|
|
||||||
|
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||||
|
|
||||||
|
- Add the item to a milestone
|
||||||
|
- Add the `frozen` label to the item
|
||||||
|
|
||||||
|
## Updating dependencies
|
||||||
|
|
||||||
|
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||||
|
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||||
|
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||||
|
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
||||||
55
README.md
55
README.md
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
[](https://github.com/docker/buildx/releases/latest)
|
[](https://github.com/docker/buildx/releases/latest)
|
||||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||||
[](https://codecov.io/gh/docker/buildx)
|
[](https://codecov.io/gh/docker/buildx)
|
||||||
|
|
||||||
@@ -32,16 +32,6 @@ Key features:
|
|||||||
- [Building with buildx](#building-with-buildx)
|
- [Building with buildx](#building-with-buildx)
|
||||||
- [Working with builder instances](#working-with-builder-instances)
|
- [Working with builder instances](#working-with-builder-instances)
|
||||||
- [Building multi-platform images](#building-multi-platform-images)
|
- [Building multi-platform images](#building-multi-platform-images)
|
||||||
- [Guides](docs/guides)
|
|
||||||
- [High-level build options with Bake](docs/guides/bake/index.md)
|
|
||||||
- [CI/CD](docs/guides/cicd.md)
|
|
||||||
- [CNI networking](docs/guides/cni-networking.md)
|
|
||||||
- [Using a custom network](docs/guides/custom-network.md)
|
|
||||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
|
||||||
- [OpenTelemetry support](docs/guides/opentelemetry.md)
|
|
||||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
|
||||||
- [Drivers](docs/guides/drivers/index.md)
|
|
||||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
|
||||||
- [Reference](docs/reference/buildx.md)
|
- [Reference](docs/reference/buildx.md)
|
||||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||||
- [`buildx build`](docs/reference/buildx_build.md)
|
- [`buildx build`](docs/reference/buildx_build.md)
|
||||||
@@ -51,21 +41,25 @@ Key features:
|
|||||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
||||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
||||||
- [`buildx install`](docs/reference/buildx_install.md)
|
|
||||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
- [`buildx rm`](docs/reference/buildx_rm.md)
|
||||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
- [`buildx stop`](docs/reference/buildx_stop.md)
|
||||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
|
||||||
- [`buildx use`](docs/reference/buildx_use.md)
|
- [`buildx use`](docs/reference/buildx_use.md)
|
||||||
- [`buildx version`](docs/reference/buildx_version.md)
|
- [`buildx version`](docs/reference/buildx_version.md)
|
||||||
- [Contributing](#contributing)
|
- [Contributing](#contributing)
|
||||||
|
|
||||||
|
For more information on how to use Buildx, see
|
||||||
|
[Docker Build docs](https://docs.docker.com/build/).
|
||||||
|
|
||||||
# Installing
|
# Installing
|
||||||
|
|
||||||
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
||||||
A limited set of functionality works with older versions of Docker when
|
|
||||||
invoking the binary directly.
|
> [!WARNING]
|
||||||
|
> Using an incompatible version of Docker may result in unexpected behavior,
|
||||||
|
> and will likely cause issues, especially when using Buildx builders with more
|
||||||
|
> recent versions of BuildKit.
|
||||||
|
|
||||||
## Windows and macOS
|
## Windows and macOS
|
||||||
|
|
||||||
@@ -74,13 +68,13 @@ for Windows and macOS.
|
|||||||
|
|
||||||
## Linux packages
|
## Linux packages
|
||||||
|
|
||||||
Docker Linux packages also include Docker Buildx when installed using the
|
Docker Engine package repositories contain Docker Buildx packages when installed according to the
|
||||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
[Docker Engine install documentation](https://docs.docker.com/engine/install/). Install the
|
||||||
|
`docker-buildx-plugin` package to install the Buildx plugin.
|
||||||
|
|
||||||
## Manual download
|
## Manual download
|
||||||
|
|
||||||
> **Important**
|
> [!IMPORTANT]
|
||||||
>
|
|
||||||
> This section is for unattended installation of the buildx component. These
|
> This section is for unattended installation of the buildx component. These
|
||||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||||
> installing buildx using manual download in production environments as they
|
> installing buildx using manual download in production environments as they
|
||||||
@@ -111,8 +105,7 @@ On Windows:
|
|||||||
* `C:\ProgramData\Docker\cli-plugins`
|
* `C:\ProgramData\Docker\cli-plugins`
|
||||||
* `C:\Program Files\Docker\cli-plugins`
|
* `C:\Program Files\Docker\cli-plugins`
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
>
|
|
||||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||||
> ```shell
|
> ```shell
|
||||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||||
@@ -123,7 +116,8 @@ On Windows:
|
|||||||
Here is how to install and use Buildx inside a Dockerfile through the
|
Here is how to install and use Buildx inside a Dockerfile through the
|
||||||
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||||
|
|
||||||
```Dockerfile
|
```dockerfile
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
FROM docker
|
FROM docker
|
||||||
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
||||||
RUN docker buildx version
|
RUN docker buildx version
|
||||||
@@ -143,7 +137,7 @@ To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_unin
|
|||||||
# Buildx 0.6+
|
# Buildx 0.6+
|
||||||
$ docker buildx bake "https://github.com/docker/buildx.git"
|
$ docker buildx bake "https://github.com/docker/buildx.git"
|
||||||
$ mkdir -p ~/.docker/cli-plugins
|
$ mkdir -p ~/.docker/cli-plugins
|
||||||
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
$ mv ./bin/build/buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
# Docker 19.03+
|
# Docker 19.03+
|
||||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
||||||
@@ -190,12 +184,12 @@ through various "drivers". Each driver defines how and where a build should
|
|||||||
run, and have different feature sets.
|
run, and have different feature sets.
|
||||||
|
|
||||||
We currently support the following drivers:
|
We currently support the following drivers:
|
||||||
- The `docker` driver ([guide](docs/guides/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `docker` driver ([guide](https://docs.docker.com/build/drivers/docker/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `docker-container` driver ([guide](docs/guides/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `docker-container` driver ([guide](https://docs.docker.com/build/drivers/docker-container/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `kubernetes` driver ([guide](docs/guides/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `kubernetes` driver ([guide](https://docs.docker.com/build/drivers/kubernetes/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `remote` driver ([guide](docs/guides/drivers/remote.md))
|
- The `remote` driver ([guide](https://docs.docker.com/build/drivers/remote/))
|
||||||
|
|
||||||
For more information on drivers, see the [drivers guide](docs/guides/drivers/index.md).
|
For more information on drivers, see the [drivers guide](https://docs.docker.com/build/drivers/).
|
||||||
|
|
||||||
## Working with builder instances
|
## Working with builder instances
|
||||||
|
|
||||||
@@ -298,6 +292,7 @@ inside your Dockerfile and can be leveraged by the processes running as part
|
|||||||
of your build.
|
of your build.
|
||||||
|
|
||||||
```dockerfile
|
```dockerfile
|
||||||
|
# syntax=docker/dockerfile:1
|
||||||
FROM --platform=$BUILDPLATFORM golang:alpine AS build
|
FROM --platform=$BUILDPLATFORM golang:alpine AS build
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG BUILDPLATFORM
|
ARG BUILDPLATFORM
|
||||||
@@ -311,7 +306,7 @@ cross-compilation helpers for more advanced use-cases.
|
|||||||
|
|
||||||
## High-level build options
|
## High-level build options
|
||||||
|
|
||||||
See [`docs/guides/bake/index.md`](docs/guides/bake/index.md) for more details.
|
See [High-level builds with Bake](https://docs.docker.com/build/bake/) for more details.
|
||||||
|
|
||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
|
|||||||
1194
bake/bake.go
1194
bake/bake.go
File diff suppressed because it is too large
Load Diff
1478
bake/bake_test.go
1478
bake/bake_test.go
File diff suppressed because it is too large
Load Diff
290
bake/compose.go
290
bake/compose.go
@@ -1,37 +1,56 @@
|
|||||||
package bake
|
package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/dotenv"
|
"github.com/compose-spec/compose-go/v2/consts"
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
"github.com/compose-spec/compose-go/v2/loader"
|
||||||
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
|
dockeropts "github.com/docker/cli/opts"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errComposeInvalid is returned when a compose file is invalid
|
func ParseComposeFiles(fs []File) (*Config, error) {
|
||||||
var errComposeInvalid = errors.New("invalid compose file")
|
envs, err := composeEnv()
|
||||||
|
|
||||||
func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|
||||||
cfg, err := loader.Load(compose.ConfigDetails{
|
|
||||||
ConfigFiles: []compose.ConfigFile{
|
|
||||||
{
|
|
||||||
Content: dt,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Environment: envs,
|
|
||||||
}, func(options *loader.Options) {
|
|
||||||
options.SkipNormalization = true
|
|
||||||
options.SkipConsistencyCheck = true
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err = composeValidate(cfg); err != nil {
|
var cfgs []composetypes.ConfigFile
|
||||||
|
for _, f := range fs {
|
||||||
|
cfgs = append(cfgs, composetypes.ConfigFile{
|
||||||
|
Filename: f.Name,
|
||||||
|
Content: f.Data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ParseCompose(cfgs, envs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Config, error) {
|
||||||
|
if envs == nil {
|
||||||
|
envs = make(map[string]string)
|
||||||
|
}
|
||||||
|
cfg, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||||
|
ConfigFiles: cfgs,
|
||||||
|
Environment: envs,
|
||||||
|
}, func(options *loader.Options) {
|
||||||
|
projectName := "bake"
|
||||||
|
if v, ok := envs[consts.ComposeProjectName]; ok && v != "" {
|
||||||
|
projectName = v
|
||||||
|
}
|
||||||
|
options.SetProjectName(projectName, false)
|
||||||
|
options.SkipNormalization = true
|
||||||
|
options.Profiles = []string{"*"}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -43,8 +62,9 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
g := &Group{Name: "default"}
|
g := &Group{Name: "default"}
|
||||||
|
|
||||||
for _, s := range cfg.Services {
|
for _, s := range cfg.Services {
|
||||||
|
s := s
|
||||||
if s.Build == nil {
|
if s.Build == nil {
|
||||||
s.Build = &compose.BuildConfig{}
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
targetName := sanitizeTargetName(s.Name)
|
targetName := sanitizeTargetName(s.Name)
|
||||||
@@ -62,8 +82,57 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
dockerfilePath := s.Build.Dockerfile
|
dockerfilePath := s.Build.Dockerfile
|
||||||
dockerfilePathP = &dockerfilePath
|
dockerfilePathP = &dockerfilePath
|
||||||
}
|
}
|
||||||
|
var dockerfileInlineP *string
|
||||||
|
if s.Build.DockerfileInline != "" {
|
||||||
|
dockerfileInline := s.Build.DockerfileInline
|
||||||
|
dockerfileInlineP = &dockerfileInline
|
||||||
|
}
|
||||||
|
|
||||||
var secrets []string
|
var additionalContexts map[string]string
|
||||||
|
if s.Build.AdditionalContexts != nil {
|
||||||
|
additionalContexts = map[string]string{}
|
||||||
|
for k, v := range s.Build.AdditionalContexts {
|
||||||
|
if strings.HasPrefix(v, "service:") {
|
||||||
|
v = strings.Replace(v, "service:", "target:", 1)
|
||||||
|
}
|
||||||
|
additionalContexts[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var shmSize *string
|
||||||
|
if s.Build.ShmSize > 0 {
|
||||||
|
shmSizeBytes := dockeropts.MemBytes(s.Build.ShmSize)
|
||||||
|
shmSizeStr := shmSizeBytes.String()
|
||||||
|
shmSize = &shmSizeStr
|
||||||
|
}
|
||||||
|
|
||||||
|
var networkModeP *string
|
||||||
|
if s.Build.Network != "" {
|
||||||
|
networkMode := s.Build.Network
|
||||||
|
networkModeP = &networkMode
|
||||||
|
}
|
||||||
|
|
||||||
|
var ulimits []string
|
||||||
|
if s.Build.Ulimits != nil {
|
||||||
|
for n, u := range s.Build.Ulimits {
|
||||||
|
ulimit, err := units.ParseUlimit(fmt.Sprintf("%s=%d:%d", n, u.Soft, u.Hard))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ulimits = append(ulimits, ulimit.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var ssh []*buildflags.SSH
|
||||||
|
for _, bkey := range s.Build.SSH {
|
||||||
|
sshkey := composeToBuildkitSSH(bkey)
|
||||||
|
ssh = append(ssh, sshkey)
|
||||||
|
}
|
||||||
|
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
|
|
||||||
|
var secrets []*buildflags.Secret
|
||||||
for _, bs := range s.Build.Secrets {
|
for _, bs := range s.Build.Secrets {
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -72,13 +141,32 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
secrets = append(secrets, secret)
|
secrets = append(secrets, secret)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// compose does not support nil values for labels
|
||||||
|
labels := map[string]*string{}
|
||||||
|
for k, v := range s.Build.Labels {
|
||||||
|
v := v
|
||||||
|
labels[k] = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
g.Targets = append(g.Targets, targetName)
|
g.Targets = append(g.Targets, targetName)
|
||||||
t := &Target{
|
t := &Target{
|
||||||
Name: targetName,
|
Name: targetName,
|
||||||
Context: contextPathP,
|
Context: contextPathP,
|
||||||
|
Contexts: additionalContexts,
|
||||||
Dockerfile: dockerfilePathP,
|
Dockerfile: dockerfilePathP,
|
||||||
|
DockerfileInline: dockerfileInlineP,
|
||||||
Tags: s.Build.Tags,
|
Tags: s.Build.Tags,
|
||||||
Labels: s.Build.Labels,
|
Labels: labels,
|
||||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||||
if val, ok := s.Environment[val]; ok && val != nil {
|
if val, ok := s.Environment[val]; ok && val != nil {
|
||||||
return *val, true
|
return *val, true
|
||||||
@@ -86,10 +174,14 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: cacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
CacheTo: cacheTo,
|
||||||
NetworkMode: &s.Build.Network,
|
NetworkMode: networkModeP,
|
||||||
|
Platforms: s.Build.Platforms,
|
||||||
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
|
ShmSize: shmSize,
|
||||||
|
Ulimits: ulimits,
|
||||||
}
|
}
|
||||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -104,12 +196,56 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
}
|
}
|
||||||
c.Groups = append(c.Groups, g)
|
c.Groups = append(c.Groups, g)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateComposeFile(dt []byte, fn string) (bool, error) {
|
||||||
|
envs, err := composeEnv()
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
fnl := strings.ToLower(fn)
|
||||||
|
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
||||||
|
return true, validateCompose(dt, envs)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
err = validateCompose(dt, envs)
|
||||||
|
return err == nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateCompose(dt []byte, envs map[string]string) error {
|
||||||
|
_, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||||
|
ConfigFiles: []composetypes.ConfigFile{
|
||||||
|
{
|
||||||
|
Content: dt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Environment: envs,
|
||||||
|
}, func(options *loader.Options) {
|
||||||
|
options.SetProjectName("bake", false)
|
||||||
|
options.SkipNormalization = true
|
||||||
|
// consistency is checked later in ParseCompose to ensure multiple
|
||||||
|
// compose files can be merged together
|
||||||
|
options.SkipConsistencyCheck = true
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func composeEnv() (map[string]string, error) {
|
||||||
|
envs := sliceToMap(os.Environ())
|
||||||
|
if wd, err := os.Getwd(); err == nil {
|
||||||
|
envs, err = loadDotEnv(envs, wd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return envs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string, error) {
|
func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string, error) {
|
||||||
if curenv == nil {
|
if curenv == nil {
|
||||||
curenv = make(map[string]string)
|
curenv = make(map[string]string)
|
||||||
@@ -131,7 +267,7 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
envs, err := dotenv.UnmarshalBytes(dt)
|
envs, err := dotenv.UnmarshalBytesWithLookup(dt, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -146,16 +282,16 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
|
|||||||
return curenv, nil
|
return curenv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
func flatten(in composetypes.MappingWithEquals) map[string]*string {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := compose.Mapping{}
|
out := map[string]*string{}
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out[k] = *v
|
out[k] = v
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -175,13 +311,15 @@ type xbake struct {
|
|||||||
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
||||||
Contexts stringMap `yaml:"contexts,omitempty"`
|
Contexts stringMap `yaml:"contexts,omitempty"`
|
||||||
// don't forget to update documentation if you add a new field:
|
// don't forget to update documentation if you add a new field:
|
||||||
// docs/guides/bake/compose-file.md#extension-field-with-x-bake
|
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
type stringMap map[string]string
|
type (
|
||||||
type stringArray []string
|
stringMap map[string]string
|
||||||
|
stringArray []string
|
||||||
|
)
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var multi []string
|
var multi []string
|
||||||
err := unmarshal(&multi)
|
err := unmarshal(&multi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -198,7 +336,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
func (t *Target) composeExtTarget(exts map[string]any) error {
|
||||||
var xb xbake
|
var xb xbake
|
||||||
|
|
||||||
ext, ok := exts["x-bake"]
|
ext, ok := exts["x-bake"]
|
||||||
@@ -215,22 +353,45 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheFrom) > 0 {
|
if len(xb.CacheFrom) > 0 {
|
||||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||||
}
|
}
|
||||||
if len(xb.CacheTo) > 0 {
|
if len(xb.CacheTo) > 0 {
|
||||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||||
}
|
}
|
||||||
if len(xb.Secrets) > 0 {
|
if len(xb.Secrets) > 0 {
|
||||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Secrets = t.Secrets.Merge(secrets)
|
||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.SSH = t.SSH.Merge(ssh)
|
||||||
|
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
}
|
}
|
||||||
if len(xb.Outputs) > 0 {
|
if len(xb.Outputs) > 0 {
|
||||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Outputs = t.Outputs.Merge(outputs)
|
||||||
}
|
}
|
||||||
if xb.Pull != nil {
|
if xb.Pull != nil {
|
||||||
t.Pull = xb.Pull
|
t.Pull = xb.Pull
|
||||||
@@ -248,45 +409,32 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeValidate validates a compose file
|
|
||||||
func composeValidate(project *compose.Project) error {
|
|
||||||
for _, s := range project.Services {
|
|
||||||
if s.Build != nil {
|
|
||||||
for _, secret := range s.Build.Secrets {
|
|
||||||
if _, ok := project.Secrets[secret.Source]; !ok {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("service %q refers to undefined build secret %s", sanitizeTargetName(s.Name), secret.Source))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for name, secret := range project.Secrets {
|
|
||||||
if secret.External.External {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if secret.File == "" && secret.Environment == "" {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("secret %q must declare either `file` or `environment`", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
|
||||||
if psecret.External.External {
|
if psecret.External {
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bkattrs []string
|
secret := &buildflags.Secret{}
|
||||||
if inp.Source != "" {
|
if inp.Source != "" {
|
||||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
secret.ID = inp.Source
|
||||||
}
|
}
|
||||||
if psecret.File != "" {
|
if psecret.File != "" {
|
||||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
secret.FilePath = psecret.File
|
||||||
}
|
}
|
||||||
if psecret.Environment != "" {
|
if psecret.Environment != "" {
|
||||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
secret.Env = psecret.Environment
|
||||||
}
|
}
|
||||||
|
return secret, nil
|
||||||
return strings.Join(bkattrs, ","), nil
|
}
|
||||||
|
|
||||||
|
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||||
|
// csv format.
|
||||||
|
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
|
||||||
|
bkssh := &buildflags.SSH{ID: sshKey.ID}
|
||||||
|
if sshKey.Path != "" {
|
||||||
|
bkssh.Paths = []string{sshKey.Path}
|
||||||
|
}
|
||||||
|
return bkssh
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,11 +6,13 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseCompose(t *testing.T) {
|
func TestParseCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build: ./db
|
build: ./db
|
||||||
@@ -19,6 +21,8 @@ services:
|
|||||||
webapp:
|
webapp:
|
||||||
build:
|
build:
|
||||||
context: ./dir
|
context: ./dir
|
||||||
|
additional_contexts:
|
||||||
|
foo: ./bar
|
||||||
dockerfile: Dockerfile-alternate
|
dockerfile: Dockerfile-alternate
|
||||||
network:
|
network:
|
||||||
none
|
none
|
||||||
@@ -28,9 +32,19 @@ services:
|
|||||||
- type=local,src=path/to/cache
|
- type=local,src=path/to/cache
|
||||||
cache_to:
|
cache_to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
|
ssh:
|
||||||
|
- key=/path/to/key
|
||||||
|
- default
|
||||||
secrets:
|
secrets:
|
||||||
- token
|
- token
|
||||||
- aws
|
- aws
|
||||||
|
webapp2:
|
||||||
|
profiles:
|
||||||
|
- test
|
||||||
|
build:
|
||||||
|
context: ./dir
|
||||||
|
dockerfile_inline: |
|
||||||
|
FROM alpine
|
||||||
secrets:
|
secrets:
|
||||||
token:
|
token:
|
||||||
environment: ENV_TOKEN
|
environment: ENV_TOKEN
|
||||||
@@ -38,51 +52,58 @@ secrets:
|
|||||||
file: /root/.aws/credentials
|
file: /root/.aws/credentials
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
sort.Strings(c.Groups[0].Targets)
|
sort.Strings(c.Groups[0].Targets)
|
||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp", "webapp2"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 3, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, "db", c.Targets[0].Name)
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "db", *c.Targets[0].Context)
|
||||||
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, "./dir", *c.Targets[1].Context)
|
require.Equal(t, "dir", *c.Targets[1].Context)
|
||||||
|
require.Equal(t, map[string]string{"foo": "bar"}, c.Targets[1].Contexts)
|
||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
|
require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
|
||||||
"id=aws,src=/root/.aws/credentials",
|
"id=aws,src=/root/.aws/credentials",
|
||||||
}, c.Targets[1].Secrets)
|
"id=token,env=ENV_TOKEN",
|
||||||
|
}, stringify(c.Targets[1].Secrets))
|
||||||
|
|
||||||
|
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||||
|
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||||
|
require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
external:
|
external:
|
||||||
image: "verycooldb:1337"
|
image: "verycooldb:1337"
|
||||||
webapp:
|
webapp:
|
||||||
build: ./db
|
build: ./db
|
||||||
`)
|
`)
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -94,7 +115,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
@@ -108,7 +129,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -119,7 +140,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
@@ -132,7 +153,7 @@ services:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildArgEnvCompose(t *testing.T) {
|
func TestBuildArgEnvCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
@@ -146,33 +167,30 @@ services:
|
|||||||
BRB: FOO
|
BRB: FOO
|
||||||
`)
|
`)
|
||||||
|
|
||||||
os.Setenv("FOO", "bar")
|
t.Setenv("FOO", "bar")
|
||||||
defer os.Unsetenv("FOO")
|
t.Setenv("BAR", "foo")
|
||||||
os.Setenv("BAR", "foo")
|
t.Setenv("ZZZ_BAR", "zzz_foo")
|
||||||
defer os.Unsetenv("BAR")
|
|
||||||
os.Setenv("ZZZ_BAR", "zzz_foo")
|
|
||||||
defer os.Unsetenv("ZZZ_BAR")
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt, sliceToMap(os.Environ()))
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "bar", c.Targets[0].Args["FOO"])
|
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["FOO"])
|
||||||
require.Equal(t, "zzz_foo", c.Targets[0].Args["BAR"])
|
require.Equal(t, ptrstr("zzz_foo"), c.Targets[0].Args["BAR"])
|
||||||
require.Equal(t, "FOO", c.Targets[0].Args["BRB"])
|
require.Equal(t, ptrstr("FOO"), c.Targets[0].Args["BRB"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestInconsistentComposeFile(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
networks:
|
networks:
|
||||||
@@ -192,12 +210,12 @@ networks:
|
|||||||
gateway: 10.5.0.254
|
gateway: 10.5.0.254
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTags(t *testing.T) {
|
func TestTags(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
image: example
|
image: example
|
||||||
@@ -209,13 +227,13 @@ services:
|
|||||||
- bar
|
- bar
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
@@ -246,12 +264,12 @@ networks:
|
|||||||
name: test-net
|
name: test-net
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExt(t *testing.T) {
|
func TestComposeExt(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
addon:
|
addon:
|
||||||
image: ct-addon:bar
|
image: ct-addon:bar
|
||||||
@@ -264,6 +282,8 @@ services:
|
|||||||
- user/app:cache
|
- user/app:cache
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:baz
|
- ct-addon:baz
|
||||||
|
ssh:
|
||||||
|
key: /path/to/key
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
@@ -273,6 +293,9 @@ services:
|
|||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
- ct-addon:alp
|
- ct-addon:alp
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
|
- other=path/to/otherkey
|
||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
- linux/amd64
|
||||||
- linux/arm64
|
- linux/arm64
|
||||||
@@ -289,6 +312,11 @@ services:
|
|||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
|
shm_size: 128m
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 1024
|
||||||
|
hard: 1024
|
||||||
x-bake:
|
x-bake:
|
||||||
secret:
|
secret:
|
||||||
- id=mysecret,src=/local/secret
|
- id=mysecret,src=/local/secret
|
||||||
@@ -299,29 +327,32 @@ services:
|
|||||||
no-cache: true
|
no-cache: true
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
||||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
|
||||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
|
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||||
|
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
image: app:bar
|
image: app:bar
|
||||||
@@ -332,6 +363,8 @@ services:
|
|||||||
- user/app:cache
|
- user/app:cache
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
x-bake:
|
x-bake:
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
@@ -341,14 +374,18 @@ services:
|
|||||||
- type=local,src=path/to/cache
|
- type=local,src=path/to/cache
|
||||||
cache-to:
|
cache-to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
|
- key=path/to/key
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
@@ -359,7 +396,7 @@ func TestEnv(t *testing.T) {
|
|||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -376,9 +413,9 @@ services:
|
|||||||
- ` + envf.Name() + `
|
- ` + envf.Name() + `
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "FOO": ptrstr("bsdf -csdf"), "NODE_ENV": ptrstr("test")}, c.Targets[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDotEnv(t *testing.T) {
|
func TestDotEnv(t *testing.T) {
|
||||||
@@ -387,7 +424,7 @@ func TestDotEnv(t *testing.T) {
|
|||||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
@@ -397,13 +434,16 @@ services:
|
|||||||
`)
|
`)
|
||||||
|
|
||||||
chdir(t, tmpdir)
|
chdir(t, tmpdir)
|
||||||
c, _, err := ParseComposeFile(dt, "docker-compose.yml")
|
c, err := ParseComposeFiles([]File{{
|
||||||
|
Name: "docker-compose.yml",
|
||||||
|
Data: dt,
|
||||||
|
}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, map[string]string{"FOO": "bar"}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"FOO": ptrstr("bar")}, c.Targets[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
func TestPorts(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
@@ -419,10 +459,25 @@ services:
|
|||||||
published: "3306"
|
published: "3306"
|
||||||
protocol: tcp
|
protocol: tcp
|
||||||
`)
|
`)
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPlatforms(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
`)
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
|
}
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
func newBool(val bool) *bool {
|
||||||
b := val
|
b := val
|
||||||
return &b
|
return &b
|
||||||
@@ -465,12 +520,12 @@ func TestServiceName(t *testing.T) {
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.svc, func(t *testing.T) {
|
t.Run(tt.svc, func(t *testing.T) {
|
||||||
_, err := ParseCompose([]byte(`
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: []byte(`
|
||||||
services:
|
services:
|
||||||
`+tt.svc+`:
|
` + tt.svc + `:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
`), nil)
|
`)}}, nil)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -536,7 +591,7 @@ services:
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, err := ParseCompose(tt.dt, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: tt.dt}}, nil)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -546,6 +601,249 @@ services:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateComposeFile(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
fn string
|
||||||
|
dt []byte
|
||||||
|
isCompose bool
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty service",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "build",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build: .
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "image",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
simple:
|
||||||
|
image: nginx
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown ext",
|
||||||
|
fn: "docker-compose.foo",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
simple:
|
||||||
|
image: nginx
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hcl",
|
||||||
|
fn: "docker-bake.hcl",
|
||||||
|
dt: []byte(`
|
||||||
|
target "default" {
|
||||||
|
dockerfile = "test"
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
isCompose: false,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range cases {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
isCompose, err := validateComposeFile(tt.dt, tt.fn)
|
||||||
|
assert.Equal(t, tt.isCompose, isCompose)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComposeNullArgs(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
args:
|
||||||
|
FOO: null
|
||||||
|
bar: "baz"
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDependsOn(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
depends_on:
|
||||||
|
- bar
|
||||||
|
bar:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
`)
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInclude(t *testing.T) {
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
|
err := os.WriteFile(filepath.Join(tmpdir, "compose-foo.yml"), []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
target: buildfoo
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
`), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dt := []byte(`
|
||||||
|
include:
|
||||||
|
- compose-foo.yml
|
||||||
|
|
||||||
|
services:
|
||||||
|
bar:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
target: buildbar
|
||||||
|
`)
|
||||||
|
|
||||||
|
chdir(t, tmpdir)
|
||||||
|
c, err := ParseComposeFiles([]File{{
|
||||||
|
Name: "composetypes.yml",
|
||||||
|
Data: dt,
|
||||||
|
}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
require.Equal(t, "bar", c.Targets[0].Name)
|
||||||
|
require.Equal(t, "buildbar", *c.Targets[0].Target)
|
||||||
|
require.Equal(t, "foo", c.Targets[1].Name)
|
||||||
|
require.Equal(t, "buildfoo", *c.Targets[1].Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDevelop(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
develop:
|
||||||
|
watch:
|
||||||
|
- path: ./webapp/html
|
||||||
|
action: sync
|
||||||
|
target: /var/www
|
||||||
|
ignore:
|
||||||
|
- node_modules/
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroup(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
cgroup: private
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProjectName(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
args:
|
||||||
|
PROJECT_NAME: ${COMPOSE_PROJECT_NAME}
|
||||||
|
`)
|
||||||
|
|
||||||
|
t.Run("default", func(t *testing.T) {
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.Targets, 1)
|
||||||
|
require.Len(t, c.Targets[0].Args, 1)
|
||||||
|
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("bake")}, c.Targets[0].Args)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("env", func(t *testing.T) {
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, map[string]string{"COMPOSE_PROJECT_NAME": "foo"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.Targets, 1)
|
||||||
|
require.Len(t, c.Targets[0].Args, 1)
|
||||||
|
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("foo")}, c.Targets[0].Args)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceContext(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
services:
|
||||||
|
base:
|
||||||
|
build:
|
||||||
|
dockerfile: baseapp.Dockerfile
|
||||||
|
command: ./entrypoint.sh
|
||||||
|
webapp:
|
||||||
|
build:
|
||||||
|
context: ./dir
|
||||||
|
additional_contexts:
|
||||||
|
base: service:base
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
|
sort.Strings(c.Groups[0].Targets)
|
||||||
|
require.Equal(t, []string{"base", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
|
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
|
||||||
|
}
|
||||||
|
|
||||||
// chdir changes the current working directory to the named directory,
|
// chdir changes the current working directory to the named directory,
|
||||||
// and then restore the original working directory at the end of the test.
|
// and then restore the original working directory at the end of the test.
|
||||||
func chdir(t *testing.T, dir string) {
|
func chdir(t *testing.T, dir string) {
|
||||||
|
|||||||
659
bake/entitlements.go
Normal file
659
bake/entitlements.go
Normal file
@@ -0,0 +1,659 @@
|
|||||||
|
package bake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EntitlementKey string
|
||||||
|
|
||||||
|
const (
|
||||||
|
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||||
|
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||||
|
EntitlementKeyDevice EntitlementKey = "device"
|
||||||
|
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||||
|
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||||
|
EntitlementKeyFS EntitlementKey = "fs"
|
||||||
|
EntitlementKeyImagePush EntitlementKey = "image.push"
|
||||||
|
EntitlementKeyImageLoad EntitlementKey = "image.load"
|
||||||
|
EntitlementKeyImage EntitlementKey = "image"
|
||||||
|
EntitlementKeySSH EntitlementKey = "ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EntitlementConf struct {
|
||||||
|
NetworkHost bool
|
||||||
|
SecurityInsecure bool
|
||||||
|
Devices *EntitlementsDevicesConf
|
||||||
|
FSRead []string
|
||||||
|
FSWrite []string
|
||||||
|
ImagePush []string
|
||||||
|
ImageLoad []string
|
||||||
|
SSH bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type EntitlementsDevicesConf struct {
|
||||||
|
All bool
|
||||||
|
Devices map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||||
|
var conf EntitlementConf
|
||||||
|
for _, e := range in {
|
||||||
|
switch e {
|
||||||
|
case string(EntitlementKeyNetworkHost):
|
||||||
|
conf.NetworkHost = true
|
||||||
|
case string(EntitlementKeySecurityInsecure):
|
||||||
|
conf.SecurityInsecure = true
|
||||||
|
case string(EntitlementKeySSH):
|
||||||
|
conf.SSH = true
|
||||||
|
default:
|
||||||
|
k, v, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case string(EntitlementKeyDevice):
|
||||||
|
if v == "" {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
|
||||||
|
}
|
||||||
|
if conf.Devices == nil {
|
||||||
|
conf.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if conf.Devices.Devices == nil {
|
||||||
|
conf.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
conf.Devices.Devices[fields[0]] = struct{}{}
|
||||||
|
case string(EntitlementKeyFSRead):
|
||||||
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
|
case string(EntitlementKeyFSWrite):
|
||||||
|
conf.FSWrite = append(conf.FSWrite, v)
|
||||||
|
case string(EntitlementKeyFS):
|
||||||
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
|
conf.FSWrite = append(conf.FSWrite, v)
|
||||||
|
case string(EntitlementKeyImagePush):
|
||||||
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
|
case string(EntitlementKeyImageLoad):
|
||||||
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
|
case string(EntitlementKeyImage):
|
||||||
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
|
default:
|
||||||
|
return conf, errors.Errorf("unknown entitlement key %q", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) {
|
||||||
|
var expected EntitlementConf
|
||||||
|
|
||||||
|
for _, v := range m {
|
||||||
|
if err := c.check(v, &expected); err != nil {
|
||||||
|
return EntitlementConf{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return expected, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||||
|
for _, e := range bo.Allow {
|
||||||
|
k, rest, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case entitlements.EntitlementDevice.String():
|
||||||
|
if rest == "" {
|
||||||
|
if c.Devices == nil || !c.Devices.All {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{All: true}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields, err := csvvalue.Fields(rest, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
|
||||||
|
}
|
||||||
|
if expected.Devices == nil {
|
||||||
|
expected.Devices = &EntitlementsDevicesConf{}
|
||||||
|
}
|
||||||
|
if expected.Devices.Devices == nil {
|
||||||
|
expected.Devices.Devices = make(map[string]struct{}, 0)
|
||||||
|
}
|
||||||
|
expected.Devices.Devices[fields[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e {
|
||||||
|
case entitlements.EntitlementNetworkHost.String():
|
||||||
|
if !c.NetworkHost {
|
||||||
|
expected.NetworkHost = true
|
||||||
|
}
|
||||||
|
case entitlements.EntitlementSecurityInsecure.String():
|
||||||
|
if !c.SecurityInsecure {
|
||||||
|
expected.SecurityInsecure = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rwPaths := map[string]struct{}{}
|
||||||
|
roPaths := map[string]struct{}{}
|
||||||
|
|
||||||
|
for _, p := range collectLocalPaths(bo.Inputs) {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range bo.ExportsLocalPathsTemporary {
|
||||||
|
rwPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ce := range bo.CacheTo {
|
||||||
|
if ce.Type == "local" {
|
||||||
|
if dest, ok := ce.Attrs["dest"]; ok {
|
||||||
|
rwPaths[dest] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ci := range bo.CacheFrom {
|
||||||
|
if ci.Type == "local" {
|
||||||
|
if src, ok := ci.Attrs["src"]; ok {
|
||||||
|
roPaths[src] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range bo.SecretSpecs {
|
||||||
|
if secret.FilePath != "" {
|
||||||
|
roPaths[secret.FilePath] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ssh := range bo.SSHSpecs {
|
||||||
|
for _, p := range ssh.Paths {
|
||||||
|
roPaths[p] = struct{}{}
|
||||||
|
}
|
||||||
|
if len(ssh.Paths) == 0 {
|
||||||
|
if !c.SSH {
|
||||||
|
expected.SSH = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
expected.FSRead, err = findMissingPaths(c.FSRead, roPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected.FSWrite, err = findMissingPaths(c.FSWrite, rwPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Writer) error {
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var msgs []string
|
||||||
|
var flags []string
|
||||||
|
|
||||||
|
// these warnings are currently disabled to give users time to update
|
||||||
|
var msgsFS []string
|
||||||
|
var flagsFS []string
|
||||||
|
|
||||||
|
if c.NetworkHost {
|
||||||
|
msgs = append(msgs, " - Running build containers that can access host network")
|
||||||
|
flags = append(flags, string(EntitlementKeyNetworkHost))
|
||||||
|
}
|
||||||
|
if c.SecurityInsecure {
|
||||||
|
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||||
|
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Devices != nil {
|
||||||
|
if c.Devices.All {
|
||||||
|
msgs = append(msgs, " - Access to CDI devices")
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice))
|
||||||
|
} else {
|
||||||
|
for d := range c.Devices.Devices {
|
||||||
|
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
|
||||||
|
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SSH {
|
||||||
|
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
roPaths, rwPaths, commonPaths := groupSamePaths(c.FSRead, c.FSWrite)
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get current working directory")
|
||||||
|
}
|
||||||
|
wd, err = filepath.EvalSymlinks(wd)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to evaluate working directory")
|
||||||
|
}
|
||||||
|
roPaths = toRelativePaths(roPaths, wd)
|
||||||
|
rwPaths = toRelativePaths(rwPaths, wd)
|
||||||
|
commonPaths = toRelativePaths(commonPaths, wd)
|
||||||
|
|
||||||
|
if len(commonPaths) > 0 {
|
||||||
|
for _, p := range commonPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read and write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFS)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(roPaths) > 0 {
|
||||||
|
for _, p := range roPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Read access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSRead)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rwPaths) > 0 {
|
||||||
|
for _, p := range rwPaths {
|
||||||
|
msgsFS = append(msgsFS, fmt.Sprintf(" - Write access to path %s", p))
|
||||||
|
flagsFS = append(flagsFS, string(EntitlementKeyFSWrite)+"="+p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) == 0 && len(msgsFS) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||||
|
for _, m := range slices.Concat(msgs, msgsFS) {
|
||||||
|
fmt.Fprintf(out, "%s\n", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range flags {
|
||||||
|
flags[i] = "--allow=" + f
|
||||||
|
}
|
||||||
|
for i, f := range flagsFS {
|
||||||
|
flagsFS[i] = "--allow=" + f
|
||||||
|
}
|
||||||
|
|
||||||
|
if term {
|
||||||
|
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
args := slices.Clone(os.Args)
|
||||||
|
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||||
|
args[0] = v
|
||||||
|
}
|
||||||
|
idx := slices.Index(args, "bake")
|
||||||
|
|
||||||
|
if idx != -1 {
|
||||||
|
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||||
|
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
fsEntitlementsEnabled := true
|
||||||
|
if isRemote {
|
||||||
|
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = !vv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
|
||||||
|
if fsEntitlementsSet {
|
||||||
|
vv, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse BUILDX_BAKE_ENTITLEMENTS_FS value %q", v)
|
||||||
|
}
|
||||||
|
fsEntitlementsEnabled = vv
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fsEntitlementsEnabled && len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
|
||||||
|
fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if term {
|
||||||
|
fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
answerCh := make(chan string, 1)
|
||||||
|
go func() {
|
||||||
|
answer, _, _ := reader.ReadLine()
|
||||||
|
answerCh <- string(answer)
|
||||||
|
close(answerCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case answer := <-answerCh:
|
||||||
|
if strings.ToLower(string(answer)) == "y" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("additional privileges requested")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isParentOrEqualPath(p, parent string) bool {
|
||||||
|
if p == parent || parent == "/" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(p, filepath.Clean(parent+string(filepath.Separator))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func findMissingPaths(set []string, paths map[string]struct{}) ([]string, error) {
|
||||||
|
set, allowAny, err := evaluatePaths(set)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if allowAny {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, err = evaluateToExistingPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
paths, err = dedupPaths(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]string, 0, len(paths))
|
||||||
|
loop0:
|
||||||
|
for p := range paths {
|
||||||
|
for _, c := range set {
|
||||||
|
if isParentOrEqualPath(p, c) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(out)
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dedupPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
arr := make([]string, 0, len(in))
|
||||||
|
for p := range in {
|
||||||
|
arr = append(arr, filepath.Clean(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(arr, func(a, b string) int {
|
||||||
|
return cmp.Compare(len(a), len(b))
|
||||||
|
})
|
||||||
|
|
||||||
|
m := make(map[string]struct{}, len(arr))
|
||||||
|
loop0:
|
||||||
|
for _, p := range arr {
|
||||||
|
for parent := range m {
|
||||||
|
if strings.HasPrefix(p, parent+string(filepath.Separator)) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[p] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRelativePaths(in []string, wd string) []string {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
for _, p := range in {
|
||||||
|
rel, err := filepath.Rel(wd, p)
|
||||||
|
if err == nil {
|
||||||
|
// allow up to one level of ".." in the path
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)+"..") {
|
||||||
|
out = append(out, rel)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func groupSamePaths(in1, in2 []string) ([]string, []string, []string) {
|
||||||
|
if in1 == nil || in2 == nil {
|
||||||
|
return in1, in2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(in1)
|
||||||
|
slices.Sort(in2)
|
||||||
|
|
||||||
|
common := []string{}
|
||||||
|
i, j := 0, 0
|
||||||
|
|
||||||
|
for i < len(in1) && j < len(in2) {
|
||||||
|
switch {
|
||||||
|
case in1[i] == in2[j]:
|
||||||
|
common = append(common, in1[i])
|
||||||
|
i++
|
||||||
|
j++
|
||||||
|
case in1[i] < in2[j]:
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
in1 = removeCommonPaths(in1, common)
|
||||||
|
in2 = removeCommonPaths(in2, common)
|
||||||
|
|
||||||
|
return in1, in2, common
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeCommonPaths(in, common []string) []string {
|
||||||
|
filtered := make([]string, 0, len(in))
|
||||||
|
commonIndex := 0
|
||||||
|
for _, path := range in {
|
||||||
|
if commonIndex < len(common) && path == common[commonIndex] {
|
||||||
|
commonIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, path)
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluatePaths(in []string) ([]string, bool, error) {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
allowAny := false
|
||||||
|
for _, p := range in {
|
||||||
|
if p == "*" {
|
||||||
|
allowAny = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := filepath.Abs(p)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("failed to evaluate entitlement path %q: %v", p, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, rest, err := evaluateToExistingPath(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
if rest != "" {
|
||||||
|
v = filepath.Join(v, rest)
|
||||||
|
}
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
return out, allowAny, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||||
|
m := make(map[string]struct{}, len(in))
|
||||||
|
for p := range in {
|
||||||
|
v, _, err := evaluateToExistingPath(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
v, err = osutil.GetLongPathName(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||||
|
}
|
||||||
|
m[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateToExistingPath(in string) (string, string, error) {
|
||||||
|
in, err := filepath.Abs(in)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
volLen := volumeNameLen(in)
|
||||||
|
pathSeparator := string(os.PathSeparator)
|
||||||
|
|
||||||
|
if volLen < len(in) && os.IsPathSeparator(in[volLen]) {
|
||||||
|
volLen++
|
||||||
|
}
|
||||||
|
vol := in[:volLen]
|
||||||
|
dest := vol
|
||||||
|
linksWalked := 0
|
||||||
|
var end int
|
||||||
|
for start := volLen; start < len(in); start = end {
|
||||||
|
for start < len(in) && os.IsPathSeparator(in[start]) {
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
end = start
|
||||||
|
for end < len(in) && !os.IsPathSeparator(in[end]) {
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
|
||||||
|
if end == start {
|
||||||
|
break
|
||||||
|
} else if in[start:end] == "." {
|
||||||
|
continue
|
||||||
|
} else if in[start:end] == ".." {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen || dest[r+1:] == ".." {
|
||||||
|
if len(dest) > volLen {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += ".."
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) {
|
||||||
|
dest += pathSeparator
|
||||||
|
}
|
||||||
|
dest += in[start:end]
|
||||||
|
|
||||||
|
fi, err := os.Lstat(dest)
|
||||||
|
if err != nil {
|
||||||
|
// If the component doesn't exist, return the last valid path
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
for r := len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
return dest[:r], in[start:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vol, in[start:], nil
|
||||||
|
}
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&fs.ModeSymlink == 0 {
|
||||||
|
if !fi.Mode().IsDir() && end < len(in) {
|
||||||
|
return "", "", syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
linksWalked++
|
||||||
|
if linksWalked > 255 {
|
||||||
|
return "", "", errors.New("too many symlinks")
|
||||||
|
}
|
||||||
|
|
||||||
|
link, err := os.Readlink(dest)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
in = link + in[end:]
|
||||||
|
|
||||||
|
v := volumeNameLen(link)
|
||||||
|
if v > 0 {
|
||||||
|
if v < len(link) && os.IsPathSeparator(link[v]) {
|
||||||
|
v++
|
||||||
|
}
|
||||||
|
vol = link[:v]
|
||||||
|
dest = vol
|
||||||
|
end = len(vol)
|
||||||
|
} else if len(link) > 0 && os.IsPathSeparator(link[0]) {
|
||||||
|
dest = link[:1]
|
||||||
|
end = 1
|
||||||
|
vol = link[:1]
|
||||||
|
volLen = 1
|
||||||
|
} else {
|
||||||
|
var r int
|
||||||
|
for r = len(dest) - 1; r >= volLen; r-- {
|
||||||
|
if os.IsPathSeparator(dest[r]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r < volLen {
|
||||||
|
dest = vol
|
||||||
|
} else {
|
||||||
|
dest = dest[:r]
|
||||||
|
}
|
||||||
|
end = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filepath.Clean(dest), "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeNameLen(s string) int {
|
||||||
|
return len(filepath.VolumeName(s))
|
||||||
|
}
|
||||||
486
bake/entitlements_test.go
Normal file
486
bake/entitlements_test.go
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
package bake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEvaluateToExistingPath(t *testing.T) {
|
||||||
|
tempDir, err := osutil.GetLongPathName(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Setup temporary directory structure for testing
|
||||||
|
existingFile := filepath.Join(tempDir, "existing_file")
|
||||||
|
require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0644))
|
||||||
|
|
||||||
|
existingDir := filepath.Join(tempDir, "existing_dir")
|
||||||
|
require.NoError(t, os.Mkdir(existingDir, 0755))
|
||||||
|
|
||||||
|
symlinkToFile := filepath.Join(tempDir, "symlink_to_file")
|
||||||
|
require.NoError(t, os.Symlink(existingFile, symlinkToFile))
|
||||||
|
|
||||||
|
symlinkToDir := filepath.Join(tempDir, "symlink_to_dir")
|
||||||
|
require.NoError(t, os.Symlink(existingDir, symlinkToDir))
|
||||||
|
|
||||||
|
nonexistentPath := filepath.Join(tempDir, "nonexistent", "path", "file.txt")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Existing file",
|
||||||
|
input: existingFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Existing directory",
|
||||||
|
input: existingDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to file",
|
||||||
|
input: symlinkToFile,
|
||||||
|
expected: existingFile,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Symlink to directory",
|
||||||
|
input: symlinkToDir,
|
||||||
|
expected: existingDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent path",
|
||||||
|
input: nonexistentPath,
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent intermediate path",
|
||||||
|
input: filepath.Join(tempDir, "nonexistent", "file.txt"),
|
||||||
|
expected: tempDir,
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Root path",
|
||||||
|
input: "/",
|
||||||
|
expected: func() string {
|
||||||
|
root, _ := filepath.Abs("/")
|
||||||
|
return root
|
||||||
|
}(),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result, _, err := evaluateToExistingPath(tt.input)
|
||||||
|
|
||||||
|
if tt.expectErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDedupePaths(t *testing.T) {
|
||||||
|
wd := osutil.GetWd()
|
||||||
|
tcases := []struct {
|
||||||
|
in map[string]struct{}
|
||||||
|
out map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/d": {},
|
||||||
|
"/a/b/e": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
"/a/b/c/d": {},
|
||||||
|
"/a/b/c/d/e": {},
|
||||||
|
"/a/b/../b/c": {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"/a/b/c": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: map[string]struct{}{
|
||||||
|
filepath.Join(wd, "a/b/c"): {},
|
||||||
|
filepath.Join(wd, "../aa"): {},
|
||||||
|
filepath.Join(wd, "a/b"): {},
|
||||||
|
filepath.Join(wd, "a/b/d"): {},
|
||||||
|
filepath.Join(wd, "../aa/b"): {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
out: map[string]struct{}{
|
||||||
|
"a/b": {},
|
||||||
|
"../aa": {},
|
||||||
|
filepath.Join(wd, "../../bb"): {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range tcases {
|
||||||
|
t.Run(fmt.Sprintf("case%d", i), func(t *testing.T) {
|
||||||
|
out, err := dedupPaths(tc.in)
|
||||||
|
if err != nil {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
// convert to relative paths as that is shown to user
|
||||||
|
arr := make([]string, 0, len(out))
|
||||||
|
for k := range out {
|
||||||
|
arr = append(arr, k)
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
arr = toRelativePaths(arr, wd)
|
||||||
|
m := make(map[string]struct{})
|
||||||
|
for _, v := range arr {
|
||||||
|
m[filepath.ToSlash(v)] = struct{}{}
|
||||||
|
}
|
||||||
|
o := make(map[string]struct{}, len(tc.out))
|
||||||
|
for k := range tc.out {
|
||||||
|
o[filepath.ToSlash(k)] = struct{}{}
|
||||||
|
}
|
||||||
|
require.Equal(t, o, m)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateEntitlements(t *testing.T) {
|
||||||
|
dir1 := t.TempDir()
|
||||||
|
dir2 := t.TempDir()
|
||||||
|
|
||||||
|
// the paths returned by entitlements validation will have symlinks resolved
|
||||||
|
expDir1, err := filepath.EvalSymlinks(dir1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expDir2, err := filepath.EvalSymlinks(dir2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
escapeLink := filepath.Join(dir1, "escape_link")
|
||||||
|
require.NoError(t, os.Symlink("../../aa", escapeLink))
|
||||||
|
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expWd, err := filepath.EvalSymlinks(wd)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tcases := []struct {
|
||||||
|
name string
|
||||||
|
conf EntitlementConf
|
||||||
|
opt build.Options
|
||||||
|
expected EntitlementConf
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "No entitlements",
|
||||||
|
opt: build.Options{
|
||||||
|
Inputs: build.Inputs{
|
||||||
|
ContextState: &llb.State{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityAndNetworkHostMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecurityMissingAndNetworkHostSet",
|
||||||
|
conf: EntitlementConf{
|
||||||
|
NetworkHost: true,
|
||||||
|
},
|
||||||
|
opt: build.Options{
|
||||||
|
Allow: []string{
|
||||||
|
entitlements.EntitlementNetworkHost.String(),
|
||||||
|
entitlements.EntitlementSecurityInsecure.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SecurityInsecure: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SSHMissing",
|
||||||
|
opt: build.Options{
|
||||||
|
SSHSpecs: []*pb.SSH{
|
||||||
|
{
|
||||||
|
ID: "test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
SSH: true,
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ExportLocal",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
filepath.Join(dir1, "subdir"),
|
||||||
|
dir2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: func() []string {
|
||||||
|
exp := []string{expDir1, expDir2}
|
||||||
|
slices.Sort(exp)
|
||||||
|
return exp
|
||||||
|
}(),
|
||||||
|
FSRead: []string{expWd},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromSubFile",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: filepath.Join(dir1, "subfile"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLink",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd, dir1},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: []string{filepath.Join(expDir1, "../..")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowRoot",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"/"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSRead: func() []string {
|
||||||
|
// on windows root (/) is only allowed if it is the same volume as wd
|
||||||
|
if filepath.VolumeName(wd) == filepath.VolumeName(escapeLink) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// if not, then escapeLink is not allowed
|
||||||
|
exp, _, err := evaluateToExistingPath(escapeLink)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp, err = filepath.EvalSymlinks(exp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return []string{exp}
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SecretFromEscapeLinkAllowAny",
|
||||||
|
opt: build.Options{
|
||||||
|
SecretSpecs: []*pb.Secret{
|
||||||
|
{
|
||||||
|
FilePath: escapeLink,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{"*"},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathSubpath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
dir1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as only subpath was allowed
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingAllowedPathMatches",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||||
|
},
|
||||||
|
expected: EntitlementConf{
|
||||||
|
FSWrite: []string{expDir1}, // dir1 is still needed as build also needs to write not/exists directory
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonExistingBuildPath",
|
||||||
|
opt: build.Options{
|
||||||
|
ExportsLocalPathsTemporary: []string{
|
||||||
|
filepath.Join(dir1, "not/exists"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
conf: EntitlementConf{
|
||||||
|
FSRead: []string{wd},
|
||||||
|
FSWrite: []string{dir1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
expected, err := tc.conf.Validate(map[string]build.Options{"test": tc.opt})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.expected, expected)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGroupSamePaths(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in1 []string
|
||||||
|
in2 []string
|
||||||
|
expected1 []string
|
||||||
|
expected2 []string
|
||||||
|
expectedC []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "All common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{"/path/c", "/path/d"},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Some common paths",
|
||||||
|
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/d"},
|
||||||
|
expected1: []string{"/path/a"},
|
||||||
|
expected2: []string{"/path/d"},
|
||||||
|
expectedC: []string{"/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty inputs",
|
||||||
|
in1: []string{},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "One empty input",
|
||||||
|
in1: []string{"/path/a", "/path/b"},
|
||||||
|
in2: []string{},
|
||||||
|
expected1: []string{"/path/a", "/path/b"},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unsorted inputs with common paths",
|
||||||
|
in1: []string{"/path/c", "/path/a", "/path/b"},
|
||||||
|
in2: []string{"/path/b", "/path/c", "/path/a"},
|
||||||
|
expected1: []string{},
|
||||||
|
expected2: []string{},
|
||||||
|
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
out1, out2, common := groupSamePaths(tt.in1, tt.in2)
|
||||||
|
require.Equal(t, tt.expected1, out1, "in1 should match expected1")
|
||||||
|
require.Equal(t, tt.expected2, out2, "in2 should match expected2")
|
||||||
|
require.Equal(t, tt.expectedC, common, "common should match expectedC")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -56,7 +56,7 @@ func formatHCLError(err error, files []File) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
src := errdefs.Source{
|
src := &errdefs.Source{
|
||||||
Info: &pb.SourceInfo{
|
Info: &pb.SourceInfo{
|
||||||
Filename: d.Subject.Filename,
|
Filename: d.Subject.Filename,
|
||||||
Data: dt,
|
Data: dt,
|
||||||
@@ -72,7 +72,7 @@ func formatHCLError(err error, files []File) error {
|
|||||||
|
|
||||||
func toErrRange(in *hcl.Range) *pb.Range {
|
func toErrRange(in *hcl.Range) *pb.Range {
|
||||||
return &pb.Range{
|
return &pb.Range{
|
||||||
Start: pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
Start: &pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
||||||
End: pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
End: &pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1057
bake/hcl_test.go
1057
bake/hcl_test.go
File diff suppressed because it is too large
Load Diff
355
bake/hclparser/LICENSE
Normal file
355
bake/hclparser/LICENSE
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
Copyright (c) 2014 HashiCorp, Inc.
|
||||||
|
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
103
bake/hclparser/body.go
Normal file
103
bake/hclparser/body.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type filterBody struct {
|
||||||
|
body hcl.Body
|
||||||
|
schema *hcl.BodySchema
|
||||||
|
exclude bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func FilterIncludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
|
||||||
|
return &filterBody{
|
||||||
|
body: body,
|
||||||
|
schema: schema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FilterExcludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
|
||||||
|
return &filterBody{
|
||||||
|
body: body,
|
||||||
|
schema: schema,
|
||||||
|
exclude: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *filterBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||||
|
if b.exclude {
|
||||||
|
schema = subtractSchemas(schema, b.schema)
|
||||||
|
} else {
|
||||||
|
schema = intersectSchemas(schema, b.schema)
|
||||||
|
}
|
||||||
|
content, _, diag := b.body.PartialContent(schema)
|
||||||
|
return content, diag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *filterBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||||
|
if b.exclude {
|
||||||
|
schema = subtractSchemas(schema, b.schema)
|
||||||
|
} else {
|
||||||
|
schema = intersectSchemas(schema, b.schema)
|
||||||
|
}
|
||||||
|
return b.body.PartialContent(schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *filterBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||||
|
return b.body.JustAttributes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *filterBody) MissingItemRange() hcl.Range {
|
||||||
|
return b.body.MissingItemRange()
|
||||||
|
}
|
||||||
|
|
||||||
|
func intersectSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
|
||||||
|
result := &hcl.BodySchema{}
|
||||||
|
for _, blockA := range a.Blocks {
|
||||||
|
for _, blockB := range b.Blocks {
|
||||||
|
if blockA.Type == blockB.Type {
|
||||||
|
result.Blocks = append(result.Blocks, blockA)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, attrA := range a.Attributes {
|
||||||
|
for _, attrB := range b.Attributes {
|
||||||
|
if attrA.Name == attrB.Name {
|
||||||
|
result.Attributes = append(result.Attributes, attrA)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func subtractSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
|
||||||
|
result := &hcl.BodySchema{}
|
||||||
|
for _, blockA := range a.Blocks {
|
||||||
|
found := false
|
||||||
|
for _, blockB := range b.Blocks {
|
||||||
|
if blockA.Type == blockB.Type {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
result.Blocks = append(result.Blocks, blockA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, attrA := range a.Attributes {
|
||||||
|
found := false
|
||||||
|
for _, attrB := range b.Attributes {
|
||||||
|
if attrA.Name == attrB.Name {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
result.Attributes = append(result.Attributes, attrA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
@@ -14,15 +14,7 @@ func funcCalls(exp hcl.Expression) ([]string, hcl.Diagnostics) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
fns, err := jsonFuncCallsRecursive(exp)
|
fns, err := jsonFuncCallsRecursive(exp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, hcl.Diagnostics{
|
return nil, wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||||
&hcl.Diagnostic{
|
|
||||||
Severity: hcl.DiagError,
|
|
||||||
Summary: "Invalid expression",
|
|
||||||
Detail: err.Error(),
|
|
||||||
Subject: exp.Range().Ptr(),
|
|
||||||
Context: exp.Range().Ptr(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return fns, nil
|
return fns, nil
|
||||||
}
|
}
|
||||||
@@ -83,11 +75,11 @@ func appendJSONFuncCalls(exp hcl.Expression, m map[string]struct{}) error {
|
|||||||
|
|
||||||
// hcl/v2/json/ast#stringVal
|
// hcl/v2/json/ast#stringVal
|
||||||
val := src.FieldByName("Value")
|
val := src.FieldByName("Value")
|
||||||
if val.IsZero() {
|
if !val.IsValid() || val.IsZero() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
rng := src.FieldByName("SrcRange")
|
rng := src.FieldByName("SrcRange")
|
||||||
if val.IsZero() {
|
if rng.IsZero() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var stringVal struct {
|
var stringVal struct {
|
||||||
|
|||||||
348
bake/hclparser/gohcl/decode.go
Normal file
348
bake/hclparser/gohcl/decode.go
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodeOptions allows customizing sections of the decoding process.
|
||||||
|
type DecodeOptions struct {
|
||||||
|
ImpliedType func(gv any) (cty.Type, error)
|
||||||
|
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.decodeBodyToValue(body, ctx, rv.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeBody extracts the configuration within the given body into the given
|
||||||
|
// value. This value must be a non-nil pointer to either a struct or
|
||||||
|
// a map, where in the former case the configuration will be decoded using
|
||||||
|
// struct tags and in the latter case only attributes are allowed and their
|
||||||
|
// values are decoded into the map.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
et := val.Type()
|
||||||
|
switch et.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return o.decodeBodyToStruct(body, ctx, val)
|
||||||
|
case reflect.Map:
|
||||||
|
return o.decodeBodyToMap(body, ctx, val)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
schema, partial := ImpliedBodySchema(val.Interface())
|
||||||
|
|
||||||
|
var content *hcl.BodyContent
|
||||||
|
var leftovers hcl.Body
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
if partial {
|
||||||
|
content, leftovers, diags = body.PartialContent(schema)
|
||||||
|
} else {
|
||||||
|
content, diags = body.Content(schema)
|
||||||
|
}
|
||||||
|
if content == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(val.Type())
|
||||||
|
|
||||||
|
if tags.Body != nil {
|
||||||
|
fieldIdx := *tags.Body
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(body))
|
||||||
|
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags.Remain != nil {
|
||||||
|
fieldIdx := *tags.Remain
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(leftovers))
|
||||||
|
case attrsType.AssignableTo(field.Type):
|
||||||
|
attrs, attrsDiags := leftovers.JustAttributes()
|
||||||
|
if len(attrsDiags) > 0 {
|
||||||
|
diags = append(diags, attrsDiags...)
|
||||||
|
}
|
||||||
|
fieldV.Set(reflect.ValueOf(attrs))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, fieldIdx := range tags.Attributes {
|
||||||
|
attr := content.Attributes[name]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
|
||||||
|
if attr == nil {
|
||||||
|
if !exprType.AssignableTo(field.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a special case, if the target is of type hcl.Expression then
|
||||||
|
// we'll assign an actual expression that evalues to a cty null,
|
||||||
|
// so the caller can deal with it within the cty realm rather
|
||||||
|
// than within the Go realm.
|
||||||
|
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||||
|
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.DecodeExpression(
|
||||||
|
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksByType := content.Blocks.ByType()
|
||||||
|
|
||||||
|
for typeName, fieldIdx := range tags.Blocks {
|
||||||
|
blocks := blocksByType[typeName]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
|
||||||
|
ty := field.Type
|
||||||
|
isSlice := false
|
||||||
|
isPtr := false
|
||||||
|
if ty.Kind() == reflect.Slice {
|
||||||
|
isSlice = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
isPtr = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) > 1 && !isSlice {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Only one %s block is allowed. Another was defined at %s.",
|
||||||
|
typeName, blocks[0].DefRange.String(),
|
||||||
|
),
|
||||||
|
Subject: &blocks[1].DefRange,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
if isSlice || isPtr {
|
||||||
|
if val.Field(fieldIdx).IsNil() {
|
||||||
|
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||||
|
Subject: body.MissingItemRange().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isSlice:
|
||||||
|
elemType := ty
|
||||||
|
if isPtr {
|
||||||
|
elemType = reflect.PointerTo(ty)
|
||||||
|
}
|
||||||
|
sli := val.Field(fieldIdx)
|
||||||
|
if sli.IsNil() {
|
||||||
|
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
if isPtr {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.New(ty))
|
||||||
|
}
|
||||||
|
v := sli.Index(i)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
sli.Index(i).Set(v)
|
||||||
|
} else {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sli.Len() > len(blocks) {
|
||||||
|
sli.SetLen(len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
val.Field(fieldIdx).Set(sli)
|
||||||
|
|
||||||
|
default:
|
||||||
|
block := blocks[0]
|
||||||
|
if isPtr {
|
||||||
|
v := val.Field(fieldIdx)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
val.Field(fieldIdx).Set(v)
|
||||||
|
} else {
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
attrs, diags := body.JustAttributes()
|
||||||
|
if attrs == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := reflect.MakeMap(v.Type())
|
||||||
|
|
||||||
|
for k, attr := range attrs {
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
ev := reflect.New(v.Type().Elem())
|
||||||
|
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Set(mv)
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
diags := o.decodeBodyToValue(block.Body, ctx, v)
|
||||||
|
|
||||||
|
if len(block.Labels) > 0 {
|
||||||
|
blockTags := getFieldTags(v.Type())
|
||||||
|
for li, lv := range block.Labels {
|
||||||
|
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||||
|
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
srcVal, diags := expr.Value(ctx)
|
||||||
|
|
||||||
|
convTy, err := o.ImpliedType(val)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVal, err = o.Convert(srcVal, convTy)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gocty.FromCtyValue(srcVal, val)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeExpression extracts the value of the given expression into the given
|
||||||
|
// value. This value must be something that gocty is able to decode into,
|
||||||
|
// since the final decoding is delegated to that package.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) withDefaults() DecodeOptions {
|
||||||
|
if o.ImpliedType == nil {
|
||||||
|
o.ImpliedType = gocty.ImpliedType
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Convert == nil {
|
||||||
|
o.Convert = convert.Convert
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
806
bake/hclparser/gohcl/decode_test.go
Normal file
806
bake/hclparser/gohcl/decode_test.go
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
hclJSON "github.com/hashicorp/hcl/v2/json"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeBody(t *testing.T) {
|
||||||
|
deepEquals := func(other any) func(v any) bool {
|
||||||
|
return func(v any) bool {
|
||||||
|
return reflect.DeepEqual(v, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNameExpression struct {
|
||||||
|
Name hcl.Expression `hcl:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withTwoAttributes struct {
|
||||||
|
A string `hcl:"a,optional"`
|
||||||
|
B string `hcl:"b,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNestedBlock struct {
|
||||||
|
Plain string `hcl:"plain,optional"`
|
||||||
|
Nested *withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocks struct {
|
||||||
|
Nested []*withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocksNoPointers struct {
|
||||||
|
Nested []withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Body map[string]any
|
||||||
|
Target func() any
|
||||||
|
Check func(v any) bool
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct{}{}),
|
||||||
|
deepEquals(struct{}{}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
1, // name is required
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name nil
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name optional
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.IsNull()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v any) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 23,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
1, // Extraneous "age" property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
})
|
||||||
|
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Remain.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{
|
||||||
|
Name: "Ermintrude",
|
||||||
|
Remain: map[string]cty.Value{
|
||||||
|
"living": cty.True,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Body.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 2 &&
|
||||||
|
attrs["name"] != nil && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle == nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 0
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": []map[string]any{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
//nolint:misspell
|
||||||
|
// Generating two diagnostics is good enough for this one.
|
||||||
|
// (one for the missing noodle block and the other for
|
||||||
|
// the JSON serialization detecting the missing level of
|
||||||
|
// heirarchy for the label.)
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
// One diagnostic is enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{},
|
||||||
|
"bar_baz": map[string]any{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodles := gotI.(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodles
|
||||||
|
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"noodle": map[string]any{
|
||||||
|
"foo_foo": map[string]any{
|
||||||
|
"type": "rice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI any) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo" && noodle.Type == "rice"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 34,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]string(nil)),
|
||||||
|
deepEquals(map[string]string{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": "34",
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 89,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]*hcl.Attribute)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 13,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||||
|
func(gotI any) bool {
|
||||||
|
got := gotI.(map[string]hcl.Expression)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]any{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]cty.Value(nil)),
|
||||||
|
deepEquals(map[string]cty.Value{
|
||||||
|
"name": cty.StringVal("Ermintrude"),
|
||||||
|
"living": cty.True,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"plain": "foo",
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Plain: "bar",
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
A: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": map[string]any{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block list while decoding
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Remove additional elements from the list while decoding nested blocks
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return len(n.Nested) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Make sure decoding value slices works the same as pointer slices.
|
||||||
|
map[string]any{
|
||||||
|
"nested": []map[string]any{
|
||||||
|
{
|
||||||
|
"b": "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"b": "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() any {
|
||||||
|
return &withListofNestedBlocksNoPointers{
|
||||||
|
Nested: []withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI any) bool {
|
||||||
|
n := gotI.(withListofNestedBlocksNoPointers)
|
||||||
|
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
// For convenience here we're going to use the JSON parser
|
||||||
|
// to process the given body.
|
||||||
|
buf, err := json.Marshal(test.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(string(buf), func(t *testing.T) {
|
||||||
|
file, diags := hclJSON.Parse(buf, "test.json")
|
||||||
|
if len(diags) != 0 {
|
||||||
|
t.Fatalf("diagnostics while parsing: %s", diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
targetVal := reflect.ValueOf(test.Target())
|
||||||
|
|
||||||
|
diags = DecodeBody(file.Body, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !test.Check(got) {
|
||||||
|
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeExpression(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Value cty.Value
|
||||||
|
Target any
|
||||||
|
Want any
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
"",
|
||||||
|
"hello",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
cty.NilVal,
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NumberIntVal(2),
|
||||||
|
"",
|
||||||
|
"2",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("true"),
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NullVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // null value is not allowed
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.UnknownVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // value must be known
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.ListVal([]cty.Value{cty.True}),
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
1, // bool required
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
|
||||||
|
expr := &fixedExpression{test.Value}
|
||||||
|
|
||||||
|
targetVal := reflect.New(reflect.TypeOf(test.Target))
|
||||||
|
|
||||||
|
diags := DecodeExpression(expr, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !reflect.DeepEqual(got, test.Want) {
|
||||||
|
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fixedExpression struct {
|
||||||
|
val cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||||
|
return e.val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Range() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) StartRange() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Variables() []hcl.Traversal {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeInstantiateType(target any) func() any {
|
||||||
|
return func() any {
|
||||||
|
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
65
bake/hclparser/gohcl/doc.go
Normal file
65
bake/hclparser/gohcl/doc.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||||
|
//
|
||||||
|
// It provides a convenient and concise way of describing the schema for
|
||||||
|
// configuration and then accessing the resulting data via native Go
|
||||||
|
// types.
|
||||||
|
//
|
||||||
|
// A struct field tag scheme is used, similar to other decoding and
|
||||||
|
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||||
|
//
|
||||||
|
// ThingType string `hcl:"thing_type,attr"`
|
||||||
|
//
|
||||||
|
// Within each tag there are two comma-separated tokens. The first is the
|
||||||
|
// name of the corresponding construct in configuration, while the second
|
||||||
|
// is a keyword giving the kind of construct expected. The following
|
||||||
|
// kind keywords are supported:
|
||||||
|
//
|
||||||
|
// attr (the default) indicates that the value is to be populated from an attribute
|
||||||
|
// block indicates that the value is to populated from a block
|
||||||
|
// label indicates that the value is to populated from a block label
|
||||||
|
// optional is the same as attr, but the field is optional
|
||||||
|
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||||
|
//
|
||||||
|
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||||
|
// expression is assigned, or of any type accepted by gocty, in which case
|
||||||
|
// gocty will be used to assign the value to a native Go type.
|
||||||
|
//
|
||||||
|
// "block" fields may be a struct that recursively uses the same tags, or a
|
||||||
|
// slice of such structs, in which case multiple blocks of the corresponding
|
||||||
|
// type are decoded into the slice.
|
||||||
|
//
|
||||||
|
// "body" can be placed on a single field of type hcl.Body to capture
|
||||||
|
// the full hcl.Body that was decoded for a block. This does not allow leftover
|
||||||
|
// values like "remain", so a decoding error will still be returned if leftover
|
||||||
|
// fields are given. If you want to capture the decoding body PLUS leftover
|
||||||
|
// fields, you must specify a "remain" field as well to prevent errors. The
|
||||||
|
// body field and the remain field will both contain the leftover fields.
|
||||||
|
//
|
||||||
|
// "label" fields are considered only in a struct used as the type of a field
|
||||||
|
// marked as "block", and are used sequentially to capture the labels of
|
||||||
|
// the blocks being decoded. In this case, the name token is used only as
|
||||||
|
// an identifier for the label in diagnostic messages.
|
||||||
|
//
|
||||||
|
// "optional" fields behave like "attr" fields, but they are optional
|
||||||
|
// and will not give parsing errors if they are missing.
|
||||||
|
//
|
||||||
|
// "remain" can be placed on a single field that may be either of type
|
||||||
|
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||||
|
// placed into this field for delayed processing. If no "remain" field is
|
||||||
|
// present then any attributes or blocks not matched by another valid tag
|
||||||
|
// will cause an error diagnostic.
|
||||||
|
//
|
||||||
|
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||||
|
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||||
|
// on the constraints there.
|
||||||
|
//
|
||||||
|
// Broadly-speaking this package deals with two types of error. The first is
|
||||||
|
// errors in the configuration itself, which are returned as diagnostics
|
||||||
|
// written with the configuration author as the target audience. The second
|
||||||
|
// is bugs in the calling program, such as invalid struct tags, which are
|
||||||
|
// surfaced via panics since there can be no useful runtime handling of such
|
||||||
|
// errors and they should certainly not be returned to the user as diagnostics.
|
||||||
|
package gohcl
|
||||||
192
bake/hclparser/gohcl/encode.go
Normal file
192
bake/hclparser/gohcl/encode.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||||
|
// attributes and blocks derived from the given value, which must be a
|
||||||
|
// struct value or a pointer to a struct value with the struct tags defined
|
||||||
|
// in this package.
|
||||||
|
//
|
||||||
|
// This function can work only with fully-decoded data. It will ignore any
|
||||||
|
// fields tagged as "remain", any fields that decode attributes into either
|
||||||
|
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||||
|
// into hcl.Attributes values. This function does not have enough information
|
||||||
|
// to complete the decoding of these types.
|
||||||
|
//
|
||||||
|
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||||
|
// to produce a whole hclwrite.Block including block labels.
|
||||||
|
//
|
||||||
|
// As long as a suitable value is given to encode and the destination body
|
||||||
|
// is non-nil, this function will always complete. It will panic in case of
|
||||||
|
// any errors in the calling program, such as passing an inappropriate type
|
||||||
|
// or a nil body.
|
||||||
|
//
|
||||||
|
// The layout of the resulting HCL source is derived from the ordering of
|
||||||
|
// the struct fields, with blank lines around nested blocks of different types.
|
||||||
|
// Fields representing attributes should usually precede those representing
|
||||||
|
// blocks so that the attributes can group together in the result. For more
|
||||||
|
// control, use the hclwrite API directly.
|
||||||
|
func EncodeIntoBody(val any, dst *hclwrite.Body) {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
populateBody(rv, ty, tags, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||||
|
// the given value, which must be a struct or pointer to struct with the
|
||||||
|
// struct tags defined in this package.
|
||||||
|
//
|
||||||
|
// If the given struct type has fields tagged with "label" tags then they
|
||||||
|
// will be used in order to annotate the created block with labels.
|
||||||
|
//
|
||||||
|
// This function has the same constraints as EncodeIntoBody and will panic
|
||||||
|
// if they are violated.
|
||||||
|
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
labels := make([]string, len(tags.Labels))
|
||||||
|
for i, lf := range tags.Labels {
|
||||||
|
lv := rv.Field(lf.FieldIndex)
|
||||||
|
// We just stringify whatever we find. It should always be a string
|
||||||
|
// but if not then we'll still do something reasonable.
|
||||||
|
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := hclwrite.NewBlock(blockType, labels)
|
||||||
|
populateBody(rv, ty, tags, block.Body())
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||||
|
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
for n, i := range tags.Attributes {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
for n, i := range tags.Blocks {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||||
|
ni, nj := namesOrder[i], namesOrder[j]
|
||||||
|
return nameIdxs[ni] < nameIdxs[nj]
|
||||||
|
})
|
||||||
|
|
||||||
|
dst.Clear()
|
||||||
|
|
||||||
|
prevWasBlock := false
|
||||||
|
for _, name := range namesOrder {
|
||||||
|
fieldIdx := nameIdxs[name]
|
||||||
|
field := ty.Field(fieldIdx)
|
||||||
|
fieldTy := field.Type
|
||||||
|
fieldVal := rv.Field(fieldIdx)
|
||||||
|
|
||||||
|
if fieldTy.Kind() == reflect.Ptr {
|
||||||
|
fieldTy = fieldTy.Elem()
|
||||||
|
fieldVal = fieldVal.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||||
|
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
if prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = false
|
||||||
|
}
|
||||||
|
|
||||||
|
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||||
|
if err != nil {
|
||||||
|
// This should never happen, since we should always be able
|
||||||
|
// to decode into the implied type.
|
||||||
|
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.SetAttributeValue(name, val)
|
||||||
|
} else { // must be a block, then
|
||||||
|
elemTy := fieldTy
|
||||||
|
isSeq := false
|
||||||
|
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||||
|
isSeq = true
|
||||||
|
elemTy = elemTy.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
prevWasBlock = false
|
||||||
|
|
||||||
|
if isSeq {
|
||||||
|
l := fieldVal.Len()
|
||||||
|
for i := range l {
|
||||||
|
elemVal := fieldVal.Index(i)
|
||||||
|
if !elemVal.IsValid() {
|
||||||
|
continue // ignore (elem value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
67
bake/hclparser/gohcl/encode_test.go
Normal file
67
bake/hclparser/gohcl/encode_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/gohcl"
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleEncodeIntoBody() {
|
||||||
|
type Service struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Exe []string `hcl:"executable"`
|
||||||
|
}
|
||||||
|
type Constraints struct {
|
||||||
|
OS string `hcl:"os"`
|
||||||
|
Arch string `hcl:"arch"`
|
||||||
|
}
|
||||||
|
type App struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Desc string `hcl:"description"`
|
||||||
|
Constraints *Constraints `hcl:"constraints,block"`
|
||||||
|
Services []Service `hcl:"service,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
app := App{
|
||||||
|
Name: "awesome-app",
|
||||||
|
Desc: "Such an awesome application",
|
||||||
|
Constraints: &Constraints{
|
||||||
|
OS: "linux",
|
||||||
|
Arch: "amd64",
|
||||||
|
},
|
||||||
|
Services: []Service{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Exe: []string{"./web", "--listen=:8080"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "worker",
|
||||||
|
Exe: []string{"./worker"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
f := hclwrite.NewEmptyFile()
|
||||||
|
gohcl.EncodeIntoBody(&app, f.Body())
|
||||||
|
fmt.Printf("%s", f.Bytes())
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// name = "awesome-app"
|
||||||
|
// description = "Such an awesome application"
|
||||||
|
//
|
||||||
|
// constraints {
|
||||||
|
// os = "linux"
|
||||||
|
// arch = "amd64"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// service "web" {
|
||||||
|
// executable = ["./web", "--listen=:8080"]
|
||||||
|
// }
|
||||||
|
// service "worker" {
|
||||||
|
// executable = ["./worker"]
|
||||||
|
// }
|
||||||
|
}
|
||||||
185
bake/hclparser/gohcl/schema.go
Normal file
185
bake/hclparser/gohcl/schema.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||||
|
// given value, which must be a struct value or a pointer to one. If an
|
||||||
|
// inappropriate value is passed, this function will panic.
|
||||||
|
//
|
||||||
|
// The second return argument indicates whether the given struct includes
|
||||||
|
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||||
|
//
|
||||||
|
// This uses the tags on the fields of the struct to discover how each
|
||||||
|
// field's value should be expressed within configuration. If an invalid
|
||||||
|
// mapping is attempted, this function will panic.
|
||||||
|
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
|
||||||
|
ty := reflect.TypeOf(val)
|
||||||
|
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrSchemas []hcl.AttributeSchema
|
||||||
|
var blockSchemas []hcl.BlockHeaderSchema
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
|
||||||
|
attrNames := make([]string, 0, len(tags.Attributes))
|
||||||
|
for n := range tags.Attributes {
|
||||||
|
attrNames = append(attrNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(attrNames)
|
||||||
|
for _, n := range attrNames {
|
||||||
|
idx := tags.Attributes[n]
|
||||||
|
optional := tags.Optional[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
|
||||||
|
var required bool
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case field.Type.AssignableTo(exprType):
|
||||||
|
//nolint:misspell
|
||||||
|
// If we're decoding to hcl.Expression then absense can be
|
||||||
|
// indicated via a null value, so we don't specify that
|
||||||
|
// the field is required during decoding.
|
||||||
|
required = false
|
||||||
|
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||||
|
required = true
|
||||||
|
default:
|
||||||
|
required = false
|
||||||
|
}
|
||||||
|
|
||||||
|
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||||
|
Name: n,
|
||||||
|
Required: required,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
blockNames := make([]string, 0, len(tags.Blocks))
|
||||||
|
for n := range tags.Blocks {
|
||||||
|
blockNames = append(blockNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(blockNames)
|
||||||
|
for _, n := range blockNames {
|
||||||
|
idx := tags.Blocks[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
fty := field.Type
|
||||||
|
if fty.Kind() == reflect.Slice {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() == reflect.Ptr {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
ftags := getFieldTags(fty)
|
||||||
|
var labelNames []string
|
||||||
|
if len(ftags.Labels) > 0 {
|
||||||
|
labelNames = make([]string, len(ftags.Labels))
|
||||||
|
for i, l := range ftags.Labels {
|
||||||
|
labelNames[i] = l.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||||
|
Type: n,
|
||||||
|
LabelNames: labelNames,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
partial = tags.Remain != nil
|
||||||
|
schema = &hcl.BodySchema{
|
||||||
|
Attributes: attrSchemas,
|
||||||
|
Blocks: blockSchemas,
|
||||||
|
}
|
||||||
|
return schema, partial
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldTags struct {
|
||||||
|
Attributes map[string]int
|
||||||
|
Blocks map[string]int
|
||||||
|
Labels []labelField
|
||||||
|
Remain *int
|
||||||
|
Body *int
|
||||||
|
Optional map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type labelField struct {
|
||||||
|
FieldIndex int
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||||
|
ret := &fieldTags{
|
||||||
|
Attributes: map[string]int{},
|
||||||
|
Blocks: map[string]int{},
|
||||||
|
Optional: map[string]bool{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := ty.NumField()
|
||||||
|
for i := range ct {
|
||||||
|
field := ty.Field(i)
|
||||||
|
tag := field.Tag.Get("hcl")
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := strings.Index(tag, ",")
|
||||||
|
var name, kind string
|
||||||
|
if comma != -1 {
|
||||||
|
name = tag[:comma]
|
||||||
|
kind = tag[comma+1:]
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
kind = "attr"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case "attr":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
case "block":
|
||||||
|
ret.Blocks[name] = i
|
||||||
|
case "label":
|
||||||
|
ret.Labels = append(ret.Labels, labelField{
|
||||||
|
FieldIndex: i,
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
case "remain":
|
||||||
|
if ret.Remain != nil {
|
||||||
|
panic("only one 'remain' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Remain = &idx
|
||||||
|
case "body":
|
||||||
|
if ret.Body != nil {
|
||||||
|
panic("only one 'body' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Body = &idx
|
||||||
|
case "optional":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
ret.Optional[name] = true
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
233
bake/hclparser/gohcl/schema_test.go
Normal file
233
bake/hclparser/gohcl/schema_test.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestImpliedBodySchema(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
val any
|
||||||
|
wantSchema *hcl.BodySchema
|
||||||
|
wantPartial bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
struct{}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Ignored bool
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr1 bool `hcl:"attr1"`
|
||||||
|
Attr2 bool `hcl:"attr2"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr1",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "attr2",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr *bool `hcl:"attr,attr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct{} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing []struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing *struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Something string `hcl:"something"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Config string `hcl:",remain"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Expr hcl.Expression `hcl:"expr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "expr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Meh string `hcl:"meh,optional"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "meh",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
|
||||||
|
schema, partial := ImpliedBodySchema(test.val)
|
||||||
|
if !reflect.DeepEqual(schema, test.wantSchema) {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong schema\ngot: %s\nwant: %s",
|
||||||
|
spew.Sdump(schema), spew.Sdump(test.wantSchema),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if partial != test.wantPartial {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong partial flag\ngot: %#v\nwant: %#v",
|
||||||
|
partial, test.wantPartial,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
19
bake/hclparser/gohcl/types.go
Normal file
19
bake/hclparser/gohcl/types.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var victimExpr hcl.Expression
|
||||||
|
var victimBody hcl.Body
|
||||||
|
|
||||||
|
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||||
|
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||||
|
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
|
||||||
|
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||||
|
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
||||||
File diff suppressed because it is too large
Load Diff
228
bake/hclparser/merged.go
Normal file
228
bake/hclparser/merged.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Forked from https://github.com/hashicorp/hcl/blob/4679383728fe331fc8a6b46036a27b8f818d9bc0/merged.go
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MergeFiles combines the given files to produce a single body that contains
|
||||||
|
// configuration from all of the given files.
|
||||||
|
//
|
||||||
|
// The ordering of the given files decides the order in which contained
|
||||||
|
// elements will be returned. If any top-level attributes are defined with
|
||||||
|
// the same name across multiple files, a diagnostic will be produced from
|
||||||
|
// the Content and PartialContent methods describing this error in a
|
||||||
|
// user-friendly way.
|
||||||
|
func MergeFiles(files []*hcl.File) hcl.Body {
|
||||||
|
var bodies []hcl.Body
|
||||||
|
for _, file := range files {
|
||||||
|
bodies = append(bodies, file.Body)
|
||||||
|
}
|
||||||
|
return MergeBodies(bodies)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeBodies is like MergeFiles except it deals directly with bodies, rather
|
||||||
|
// than with entire files.
|
||||||
|
func MergeBodies(bodies []hcl.Body) hcl.Body {
|
||||||
|
if len(bodies) == 0 {
|
||||||
|
// Swap out for our singleton empty body, to reduce the number of
|
||||||
|
// empty slices we have hanging around.
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any of the given bodies are already merged bodies, we'll unpack
|
||||||
|
// to flatten to a single mergedBodies, since that's conceptually simpler.
|
||||||
|
// This also, as a side-effect, eliminates any empty bodies, since
|
||||||
|
// empties are merged bodies with no inner bodies.
|
||||||
|
var newLen int
|
||||||
|
var flatten bool
|
||||||
|
for _, body := range bodies {
|
||||||
|
if children, merged := body.(mergedBodies); merged {
|
||||||
|
newLen += len(children)
|
||||||
|
flatten = true
|
||||||
|
} else {
|
||||||
|
newLen++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside
|
||||||
|
return mergedBodies(bodies)
|
||||||
|
}
|
||||||
|
|
||||||
|
if newLen == 0 {
|
||||||
|
// Don't allocate a new empty when we already have one
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
n := make([]hcl.Body, 0, newLen)
|
||||||
|
for _, body := range bodies {
|
||||||
|
if children, merged := body.(mergedBodies); merged {
|
||||||
|
n = append(n, children...)
|
||||||
|
} else {
|
||||||
|
n = append(n, body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mergedBodies(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyBody = mergedBodies([]hcl.Body{})
|
||||||
|
|
||||||
|
// EmptyBody returns a body with no content. This body can be used as a
|
||||||
|
// placeholder when a body is required but no body content is available.
|
||||||
|
func EmptyBody() hcl.Body {
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergedBodies []hcl.Body
|
||||||
|
|
||||||
|
// Content returns the content produced by applying the given schema to all
|
||||||
|
// of the merged bodies and merging the result.
|
||||||
|
//
|
||||||
|
// Although required attributes _are_ supported, they should be used sparingly
|
||||||
|
// with merged bodies since in this case there is no contextual information
|
||||||
|
// with which to return good diagnostics. Applications working with merged
|
||||||
|
// bodies may wish to mark all attributes as optional and then check for
|
||||||
|
// required attributes afterwards, to produce better diagnostics.
|
||||||
|
func (mb mergedBodies) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||||
|
// the returned body will always be empty in this case, because mergedContent
|
||||||
|
// will only ever call Content on the child bodies.
|
||||||
|
content, _, diags := mb.mergedContent(schema, false)
|
||||||
|
return content, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||||
|
return mb.mergedContent(schema, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||||
|
attrs := make(map[string]*hcl.Attribute)
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
|
||||||
|
for _, body := range mb {
|
||||||
|
thisAttrs, thisDiags := body.JustAttributes()
|
||||||
|
|
||||||
|
if len(thisDiags) != 0 {
|
||||||
|
diags = append(diags, thisDiags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, attr := range thisAttrs {
|
||||||
|
if existing := attrs[name]; existing != nil {
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Duplicate argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Argument %q was already set at %s",
|
||||||
|
name, existing.NameRange.String(),
|
||||||
|
),
|
||||||
|
Subject: thisAttrs[name].NameRange.Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
attrs[name] = attr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return attrs, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) MissingItemRange() hcl.Range {
|
||||||
|
if len(mb) == 0 {
|
||||||
|
// Nothing useful to return here, so we'll return some garbage.
|
||||||
|
return hcl.Range{
|
||||||
|
Filename: "<empty>",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// arbitrarily use the first body's missing item range
|
||||||
|
return mb[0].MissingItemRange()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) mergedContent(schema *hcl.BodySchema, partial bool) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||||
|
// We need to produce a new schema with none of the attributes marked as
|
||||||
|
// required, since _any one_ of our bodies can contribute an attribute value.
|
||||||
|
// We'll separately check that all required attributes are present at
|
||||||
|
// the end.
|
||||||
|
mergedSchema := &hcl.BodySchema{
|
||||||
|
Blocks: schema.Blocks,
|
||||||
|
}
|
||||||
|
for _, attrS := range schema.Attributes {
|
||||||
|
mergedAttrS := attrS
|
||||||
|
mergedAttrS.Required = false
|
||||||
|
mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mergedLeftovers []hcl.Body
|
||||||
|
content := &hcl.BodyContent{
|
||||||
|
Attributes: map[string]*hcl.Attribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
for _, body := range mb {
|
||||||
|
var thisContent *hcl.BodyContent
|
||||||
|
var thisLeftovers hcl.Body
|
||||||
|
var thisDiags hcl.Diagnostics
|
||||||
|
|
||||||
|
if partial {
|
||||||
|
thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
|
||||||
|
} else {
|
||||||
|
thisContent, thisDiags = body.Content(mergedSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
if thisLeftovers != nil {
|
||||||
|
mergedLeftovers = append(mergedLeftovers, thisLeftovers)
|
||||||
|
}
|
||||||
|
if len(thisDiags) != 0 {
|
||||||
|
diags = append(diags, thisDiags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if thisContent.Attributes != nil {
|
||||||
|
for name, attr := range thisContent.Attributes {
|
||||||
|
if existing := content.Attributes[name]; existing != nil {
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Duplicate argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Argument %q was already set at %s",
|
||||||
|
name, existing.NameRange.String(),
|
||||||
|
),
|
||||||
|
Subject: thisContent.Attributes[name].NameRange.Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
content.Attributes[name] = attr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(thisContent.Blocks) != 0 {
|
||||||
|
content.Blocks = append(content.Blocks, thisContent.Blocks...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, we check for required attributes.
|
||||||
|
for _, attrS := range schema.Attributes {
|
||||||
|
if !attrS.Required {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if content.Attributes[attrS.Name] == nil {
|
||||||
|
// We don't have any context here to produce a good diagnostic,
|
||||||
|
// which is why we warn in the Content docstring to minimize the
|
||||||
|
// use of required attributes on merged bodies.
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Missing required argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"The argument %q is required, but was not set.",
|
||||||
|
attrS.Name,
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
leftoverBody := MergeBodies(mergedLeftovers)
|
||||||
|
return content, leftoverBody, diags
|
||||||
|
}
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
package hclparser
|
package hclparser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||||
@@ -14,113 +17,245 @@ import (
|
|||||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stdlibFunctions = map[string]function.Function{
|
type funcDef struct {
|
||||||
"absolute": stdlib.AbsoluteFunc,
|
name string
|
||||||
"add": stdlib.AddFunc,
|
fn function.Function
|
||||||
"and": stdlib.AndFunc,
|
factory func() function.Function
|
||||||
"base64decode": encoding.Base64DecodeFunc,
|
}
|
||||||
"base64encode": encoding.Base64EncodeFunc,
|
|
||||||
"bcrypt": crypto.BcryptFunc,
|
var stdlibFunctions = []funcDef{
|
||||||
"byteslen": stdlib.BytesLenFunc,
|
{name: "absolute", fn: stdlib.AbsoluteFunc},
|
||||||
"bytesslice": stdlib.BytesSliceFunc,
|
{name: "add", fn: stdlib.AddFunc},
|
||||||
"can": tryfunc.CanFunc,
|
{name: "and", fn: stdlib.AndFunc},
|
||||||
"ceil": stdlib.CeilFunc,
|
{name: "base64decode", fn: encoding.Base64DecodeFunc},
|
||||||
"chomp": stdlib.ChompFunc,
|
{name: "base64encode", fn: encoding.Base64EncodeFunc},
|
||||||
"chunklist": stdlib.ChunklistFunc,
|
{name: "basename", factory: basenameFunc},
|
||||||
"cidrhost": cidr.HostFunc,
|
{name: "bcrypt", fn: crypto.BcryptFunc},
|
||||||
"cidrnetmask": cidr.NetmaskFunc,
|
{name: "byteslen", fn: stdlib.BytesLenFunc},
|
||||||
"cidrsubnet": cidr.SubnetFunc,
|
{name: "bytesslice", fn: stdlib.BytesSliceFunc},
|
||||||
"cidrsubnets": cidr.SubnetsFunc,
|
{name: "can", fn: tryfunc.CanFunc},
|
||||||
"csvdecode": stdlib.CSVDecodeFunc,
|
{name: "ceil", fn: stdlib.CeilFunc},
|
||||||
"coalesce": stdlib.CoalesceFunc,
|
{name: "chomp", fn: stdlib.ChompFunc},
|
||||||
"coalescelist": stdlib.CoalesceListFunc,
|
{name: "chunklist", fn: stdlib.ChunklistFunc},
|
||||||
"compact": stdlib.CompactFunc,
|
{name: "cidrhost", fn: cidr.HostFunc},
|
||||||
"concat": stdlib.ConcatFunc,
|
{name: "cidrnetmask", fn: cidr.NetmaskFunc},
|
||||||
"contains": stdlib.ContainsFunc,
|
{name: "cidrsubnet", fn: cidr.SubnetFunc},
|
||||||
"convert": typeexpr.ConvertFunc,
|
{name: "cidrsubnets", fn: cidr.SubnetsFunc},
|
||||||
"distinct": stdlib.DistinctFunc,
|
{name: "coalesce", fn: stdlib.CoalesceFunc},
|
||||||
"divide": stdlib.DivideFunc,
|
{name: "coalescelist", fn: stdlib.CoalesceListFunc},
|
||||||
"element": stdlib.ElementFunc,
|
{name: "compact", fn: stdlib.CompactFunc},
|
||||||
"equal": stdlib.EqualFunc,
|
{name: "concat", fn: stdlib.ConcatFunc},
|
||||||
"flatten": stdlib.FlattenFunc,
|
{name: "contains", fn: stdlib.ContainsFunc},
|
||||||
"floor": stdlib.FloorFunc,
|
{name: "convert", fn: typeexpr.ConvertFunc},
|
||||||
"formatdate": stdlib.FormatDateFunc,
|
{name: "csvdecode", fn: stdlib.CSVDecodeFunc},
|
||||||
"format": stdlib.FormatFunc,
|
{name: "dirname", factory: dirnameFunc},
|
||||||
"formatlist": stdlib.FormatListFunc,
|
{name: "distinct", fn: stdlib.DistinctFunc},
|
||||||
"greaterthan": stdlib.GreaterThanFunc,
|
{name: "divide", fn: stdlib.DivideFunc},
|
||||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
{name: "element", fn: stdlib.ElementFunc},
|
||||||
"hasindex": stdlib.HasIndexFunc,
|
{name: "equal", fn: stdlib.EqualFunc},
|
||||||
"indent": stdlib.IndentFunc,
|
{name: "flatten", fn: stdlib.FlattenFunc},
|
||||||
"index": stdlib.IndexFunc,
|
{name: "floor", fn: stdlib.FloorFunc},
|
||||||
"int": stdlib.IntFunc,
|
{name: "format", fn: stdlib.FormatFunc},
|
||||||
"jsondecode": stdlib.JSONDecodeFunc,
|
{name: "formatdate", fn: stdlib.FormatDateFunc},
|
||||||
"jsonencode": stdlib.JSONEncodeFunc,
|
{name: "formatlist", fn: stdlib.FormatListFunc},
|
||||||
"keys": stdlib.KeysFunc,
|
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
|
||||||
"join": stdlib.JoinFunc,
|
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
|
||||||
"length": stdlib.LengthFunc,
|
{name: "hasindex", fn: stdlib.HasIndexFunc},
|
||||||
"lessthan": stdlib.LessThanFunc,
|
{name: "indent", fn: stdlib.IndentFunc},
|
||||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
{name: "index", fn: stdlib.IndexFunc},
|
||||||
"log": stdlib.LogFunc,
|
{name: "indexof", factory: indexOfFunc},
|
||||||
"lookup": stdlib.LookupFunc,
|
{name: "int", fn: stdlib.IntFunc},
|
||||||
"lower": stdlib.LowerFunc,
|
{name: "join", fn: stdlib.JoinFunc},
|
||||||
"max": stdlib.MaxFunc,
|
{name: "jsondecode", fn: stdlib.JSONDecodeFunc},
|
||||||
"md5": crypto.Md5Func,
|
{name: "jsonencode", fn: stdlib.JSONEncodeFunc},
|
||||||
"merge": stdlib.MergeFunc,
|
{name: "keys", fn: stdlib.KeysFunc},
|
||||||
"min": stdlib.MinFunc,
|
{name: "length", fn: stdlib.LengthFunc},
|
||||||
"modulo": stdlib.ModuloFunc,
|
{name: "lessthan", fn: stdlib.LessThanFunc},
|
||||||
"multiply": stdlib.MultiplyFunc,
|
{name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc},
|
||||||
"negate": stdlib.NegateFunc,
|
{name: "log", fn: stdlib.LogFunc},
|
||||||
"notequal": stdlib.NotEqualFunc,
|
{name: "lookup", fn: stdlib.LookupFunc},
|
||||||
"not": stdlib.NotFunc,
|
{name: "lower", fn: stdlib.LowerFunc},
|
||||||
"or": stdlib.OrFunc,
|
{name: "max", fn: stdlib.MaxFunc},
|
||||||
"parseint": stdlib.ParseIntFunc,
|
{name: "md5", fn: crypto.Md5Func},
|
||||||
"pow": stdlib.PowFunc,
|
{name: "merge", fn: stdlib.MergeFunc},
|
||||||
"range": stdlib.RangeFunc,
|
{name: "min", fn: stdlib.MinFunc},
|
||||||
"regexall": stdlib.RegexAllFunc,
|
{name: "modulo", fn: stdlib.ModuloFunc},
|
||||||
"regex": stdlib.RegexFunc,
|
{name: "multiply", fn: stdlib.MultiplyFunc},
|
||||||
"regex_replace": stdlib.RegexReplaceFunc,
|
{name: "negate", fn: stdlib.NegateFunc},
|
||||||
"reverse": stdlib.ReverseFunc,
|
{name: "not", fn: stdlib.NotFunc},
|
||||||
"reverselist": stdlib.ReverseListFunc,
|
{name: "notequal", fn: stdlib.NotEqualFunc},
|
||||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
{name: "or", fn: stdlib.OrFunc},
|
||||||
"sethaselement": stdlib.SetHasElementFunc,
|
{name: "parseint", fn: stdlib.ParseIntFunc},
|
||||||
"setintersection": stdlib.SetIntersectionFunc,
|
{name: "pow", fn: stdlib.PowFunc},
|
||||||
"setproduct": stdlib.SetProductFunc,
|
{name: "range", fn: stdlib.RangeFunc},
|
||||||
"setsubtract": stdlib.SetSubtractFunc,
|
{name: "regex_replace", fn: stdlib.RegexReplaceFunc},
|
||||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
{name: "regex", fn: stdlib.RegexFunc},
|
||||||
"setunion": stdlib.SetUnionFunc,
|
{name: "regexall", fn: stdlib.RegexAllFunc},
|
||||||
"sha1": crypto.Sha1Func,
|
{name: "replace", fn: stdlib.ReplaceFunc},
|
||||||
"sha256": crypto.Sha256Func,
|
{name: "reverse", fn: stdlib.ReverseFunc},
|
||||||
"sha512": crypto.Sha512Func,
|
{name: "reverselist", fn: stdlib.ReverseListFunc},
|
||||||
"signum": stdlib.SignumFunc,
|
{name: "rsadecrypt", fn: crypto.RsaDecryptFunc},
|
||||||
"slice": stdlib.SliceFunc,
|
{name: "sanitize", factory: sanitizeFunc},
|
||||||
"sort": stdlib.SortFunc,
|
{name: "sethaselement", fn: stdlib.SetHasElementFunc},
|
||||||
"split": stdlib.SplitFunc,
|
{name: "setintersection", fn: stdlib.SetIntersectionFunc},
|
||||||
"strlen": stdlib.StrlenFunc,
|
{name: "setproduct", fn: stdlib.SetProductFunc},
|
||||||
"substr": stdlib.SubstrFunc,
|
{name: "setsubtract", fn: stdlib.SetSubtractFunc},
|
||||||
"subtract": stdlib.SubtractFunc,
|
{name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc},
|
||||||
"timeadd": stdlib.TimeAddFunc,
|
{name: "setunion", fn: stdlib.SetUnionFunc},
|
||||||
"timestamp": timestampFunc,
|
{name: "sha1", fn: crypto.Sha1Func},
|
||||||
"title": stdlib.TitleFunc,
|
{name: "sha256", fn: crypto.Sha256Func},
|
||||||
"trim": stdlib.TrimFunc,
|
{name: "sha512", fn: crypto.Sha512Func},
|
||||||
"trimprefix": stdlib.TrimPrefixFunc,
|
{name: "signum", fn: stdlib.SignumFunc},
|
||||||
"trimspace": stdlib.TrimSpaceFunc,
|
{name: "slice", fn: stdlib.SliceFunc},
|
||||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
{name: "sort", fn: stdlib.SortFunc},
|
||||||
"try": tryfunc.TryFunc,
|
{name: "split", fn: stdlib.SplitFunc},
|
||||||
"upper": stdlib.UpperFunc,
|
{name: "strlen", fn: stdlib.StrlenFunc},
|
||||||
"urlencode": encoding.URLEncodeFunc,
|
{name: "substr", fn: stdlib.SubstrFunc},
|
||||||
"uuidv4": uuid.V4Func,
|
{name: "subtract", fn: stdlib.SubtractFunc},
|
||||||
"uuidv5": uuid.V5Func,
|
{name: "timeadd", fn: stdlib.TimeAddFunc},
|
||||||
"values": stdlib.ValuesFunc,
|
{name: "timestamp", factory: timestampFunc},
|
||||||
"zipmap": stdlib.ZipmapFunc,
|
{name: "title", fn: stdlib.TitleFunc},
|
||||||
|
{name: "trim", fn: stdlib.TrimFunc},
|
||||||
|
{name: "trimprefix", fn: stdlib.TrimPrefixFunc},
|
||||||
|
{name: "trimspace", fn: stdlib.TrimSpaceFunc},
|
||||||
|
{name: "trimsuffix", fn: stdlib.TrimSuffixFunc},
|
||||||
|
{name: "try", fn: tryfunc.TryFunc},
|
||||||
|
{name: "upper", fn: stdlib.UpperFunc},
|
||||||
|
{name: "urlencode", fn: encoding.URLEncodeFunc},
|
||||||
|
{name: "uuidv4", fn: uuid.V4Func},
|
||||||
|
{name: "uuidv5", fn: uuid.V5Func},
|
||||||
|
{name: "values", fn: stdlib.ValuesFunc},
|
||||||
|
{name: "zipmap", fn: stdlib.ZipmapFunc},
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexOfFunc constructs a function that finds the element index for a given
|
||||||
|
// value in a list.
|
||||||
|
func indexOfFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "list",
|
||||||
|
Type: cty.DynamicPseudoType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "value",
|
||||||
|
Type: cty.DynamicPseudoType,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.Number),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||||
|
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||||
|
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !args[0].IsKnown() {
|
||||||
|
return cty.UnknownVal(cty.Number), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[0].LengthInt() == 0 { // Easy path
|
||||||
|
return cty.NilVal, errors.New("cannot search an empty list")
|
||||||
|
}
|
||||||
|
|
||||||
|
for it := args[0].ElementIterator(); it.Next(); {
|
||||||
|
i, v := it.Element()
|
||||||
|
eq, err := stdlib.Equal(v, args[1])
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilVal, err
|
||||||
|
}
|
||||||
|
if !eq.IsKnown() {
|
||||||
|
return cty.UnknownVal(cty.Number), nil
|
||||||
|
}
|
||||||
|
if eq.True() {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.NilVal, errors.New("item not found")
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// basenameFunc constructs a function that returns the last element of a path.
|
||||||
|
func basenameFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "path",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
return cty.StringVal(path.Base(in)), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirnameFunc constructs a function that returns the directory of a path.
|
||||||
|
func dirnameFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "path",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
return cty.StringVal(path.Dir(in)), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore,
|
||||||
|
// leaving only characters that are valid for a Bake target name.
|
||||||
|
func sanitizeFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "name",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
// only [a-zA-Z0-9_-]+ is allowed
|
||||||
|
var b strings.Builder
|
||||||
|
for _, r := range in {
|
||||||
|
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' {
|
||||||
|
b.WriteRune(r)
|
||||||
|
} else {
|
||||||
|
b.WriteRune('_')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.StringVal(b.String()), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
||||||
//
|
//
|
||||||
// This function was imported from terraform's datetime utilities.
|
// This function was imported from terraform's datetime utilities.
|
||||||
var timestampFunc = function.New(&function.Spec{
|
func timestampFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
Params: []function.Parameter{},
|
Params: []function.Parameter{},
|
||||||
Type: function.StaticReturnType(cty.String),
|
Type: function.StaticReturnType(cty.String),
|
||||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Stdlib() map[string]function.Function {
|
||||||
|
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
||||||
|
for _, v := range stdlibFunctions {
|
||||||
|
if v.factory != nil {
|
||||||
|
funcs[v.name] = v.factory()
|
||||||
|
} else {
|
||||||
|
funcs[v.name] = v.fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return funcs
|
||||||
|
}
|
||||||
|
|||||||
199
bake/hclparser/stdlib_test.go
Normal file
199
bake/hclparser/stdlib_test.go
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIndexOf(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
key cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"index 0": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("one"),
|
||||||
|
want: cty.NumberIntVal(0),
|
||||||
|
},
|
||||||
|
"index 3": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("four"),
|
||||||
|
want: cty.NumberIntVal(3),
|
||||||
|
},
|
||||||
|
"index -1": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("3"),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := indexOfFunc().Call([]cty.Value{test.input, test.key})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasename(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal("."),
|
||||||
|
},
|
||||||
|
"slash": {
|
||||||
|
input: cty.StringVal("/"),
|
||||||
|
want: cty.StringVal("/"),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("/foo/bar"),
|
||||||
|
want: cty.StringVal("bar"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("bar"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("/foo/bar."),
|
||||||
|
want: cty.StringVal("bar."),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar.."),
|
||||||
|
want: cty.StringVal("bar.."),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar..."),
|
||||||
|
want: cty.StringVal("bar..."),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := basenameFunc().Call([]cty.Value{test.input})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDirname(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal("."),
|
||||||
|
},
|
||||||
|
"slash": {
|
||||||
|
input: cty.StringVal("/"),
|
||||||
|
want: cty.StringVal("/"),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("/foo/bar"),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("foo"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("/foo/bar."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar.."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar..."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := dirnameFunc().Call([]cty.Value{test.input})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSanitize(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal(""),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("foo_bar"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foobar"),
|
||||||
|
want: cty.StringVal("foobar"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("foo/bar."),
|
||||||
|
want: cty.StringVal("foo_bar_"),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("foo/bar.."),
|
||||||
|
want: cty.StringVal("foo_bar__"),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("foo/bar..."),
|
||||||
|
want: cty.StringVal("foo_bar___"),
|
||||||
|
},
|
||||||
|
"utf8": {
|
||||||
|
input: cty.StringVal("foo/🍕bar"),
|
||||||
|
want: cty.StringVal("foo__bar"),
|
||||||
|
},
|
||||||
|
"symbols": {
|
||||||
|
input: cty.StringVal("foo/bar!@(ba+z)"),
|
||||||
|
want: cty.StringVal("foo_bar___ba_z_"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := sanitizeFunc().Call([]cty.Value{test.input})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
160
bake/hclparser/type_implied.go
Normal file
160
bake/hclparser/type_implied.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
// MIT License
|
||||||
|
//
|
||||||
|
// Copyright (c) 2017-2018 Martin Atkins
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
|
||||||
|
// to find a suitable cty.Type instance that could be used for a conversion
|
||||||
|
// with ToCtyValue.
|
||||||
|
//
|
||||||
|
// This allows -- for simple situations at least -- types to be defined just
|
||||||
|
// once in Go and the cty types derived from the Go types, but in the process
|
||||||
|
// it makes some assumptions that may be undesirable so applications are
|
||||||
|
// encouraged to build their cty types directly if exacting control is
|
||||||
|
// required.
|
||||||
|
//
|
||||||
|
// Not all Go types can be represented as cty types, so an error may be
|
||||||
|
// returned which is usually considered to be a bug in the calling program.
|
||||||
|
// In particular, ImpliedType will never use capsule types in its returned
|
||||||
|
// type, because it cannot know the capsule types supported by the calling
|
||||||
|
// program.
|
||||||
|
func ImpliedType(gv any) (cty.Type, error) {
|
||||||
|
rt := reflect.TypeOf(gv)
|
||||||
|
var path cty.Path
|
||||||
|
return impliedType(rt, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if ety, err := impliedTypeExt(rt, path); err == nil {
|
||||||
|
return ety, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch rt.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
return impliedType(rt.Elem(), path)
|
||||||
|
|
||||||
|
// Primitive types
|
||||||
|
case reflect.Bool:
|
||||||
|
return cty.Bool, nil
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.String:
|
||||||
|
return cty.String, nil
|
||||||
|
|
||||||
|
// Collection types
|
||||||
|
case reflect.Slice:
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.List(ety), nil
|
||||||
|
case reflect.Map:
|
||||||
|
if !stringType.AssignableTo(rt.Key()) {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
|
||||||
|
}
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.Map(ety), nil
|
||||||
|
|
||||||
|
// Structural types
|
||||||
|
case reflect.Struct:
|
||||||
|
return impliedStructType(rt, path)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if valueType.AssignableTo(rt) {
|
||||||
|
// Special case: cty.Value represents cty.DynamicPseudoType, for
|
||||||
|
// type conformance checking.
|
||||||
|
return cty.DynamicPseudoType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIdxs := structTagIndices(rt)
|
||||||
|
if len(fieldIdxs) == 0 {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
atys := make(map[string]cty.Type, len(fieldIdxs))
|
||||||
|
|
||||||
|
{
|
||||||
|
// Temporary extension of path for attributes
|
||||||
|
path := append(path, nil)
|
||||||
|
|
||||||
|
for k, fi := range fieldIdxs {
|
||||||
|
path[len(path)-1] = cty.GetAttrStep{Name: k}
|
||||||
|
|
||||||
|
ft := rt.Field(fi).Type
|
||||||
|
aty, err := impliedType(ft, path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
|
||||||
|
atys[k] = aty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.Object(atys), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
valueType = reflect.TypeOf(cty.Value{})
|
||||||
|
stringType = reflect.TypeOf("")
|
||||||
|
)
|
||||||
|
|
||||||
|
// structTagIndices interrogates the fields of the given type (which must
|
||||||
|
// be a struct type, or we'll panic) and returns a map from the cty
|
||||||
|
// attribute names declared via struct tags to the indices of the
|
||||||
|
// fields holding those tags.
|
||||||
|
//
|
||||||
|
// This function will panic if two fields within the struct are tagged with
|
||||||
|
// the same cty attribute name.
|
||||||
|
func structTagIndices(st reflect.Type) map[string]int {
|
||||||
|
ct := st.NumField()
|
||||||
|
ret := make(map[string]int, ct)
|
||||||
|
|
||||||
|
for i := range ct {
|
||||||
|
field := st.Field(i)
|
||||||
|
attrName := field.Tag.Get("cty")
|
||||||
|
if attrName != "" {
|
||||||
|
ret[attrName] = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
166
bake/hclparser/type_implied_ext.go
Normal file
166
bake/hclparser/type_implied_ext.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/errdefs"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ToCtyValueConverter interface {
|
||||||
|
// ToCtyValue will convert this capsule value into a native
|
||||||
|
// cty.Value. This should not return a capsule type.
|
||||||
|
ToCtyValue() cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type FromCtyValueConverter interface {
|
||||||
|
// FromCtyValue will initialize this value using a cty.Value.
|
||||||
|
FromCtyValue(in cty.Value, path cty.Path) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unwrapCapsuleValueExtension extensionType = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
rt = reflect.PointerTo(rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isCapsuleType(rt) {
|
||||||
|
return capsuleValueCapsuleType(rt), nil
|
||||||
|
}
|
||||||
|
return cty.NilType, errdefs.ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCapsuleType(rt reflect.Type) bool {
|
||||||
|
fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
|
||||||
|
}
|
||||||
|
|
||||||
|
var capsuleValueTypes sync.Map
|
||||||
|
|
||||||
|
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
|
||||||
|
if rt.Kind() != reflect.Pointer {
|
||||||
|
panic("capsule value must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := rt.Elem()
|
||||||
|
if val, loaded := capsuleValueTypes.Load(elem); loaded {
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||||
|
|
||||||
|
// First time used. Initialize new capsule ops.
|
||||||
|
ops := &cty.CapsuleOps{
|
||||||
|
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
|
||||||
|
return func(in cty.Value, p cty.Path) (any, error) {
|
||||||
|
rv := reflect.New(elem).Interface()
|
||||||
|
if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
|
||||||
|
return func(in any, _ cty.Path) (cty.Value, error) {
|
||||||
|
rv := reflect.ValueOf(in).Convert(toCtyValueType)
|
||||||
|
v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
|
||||||
|
return convert.Convert(v, want)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ExtensionData: func(key any) any {
|
||||||
|
switch key {
|
||||||
|
case unwrapCapsuleValueExtension:
|
||||||
|
zero := reflect.Zero(elem).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
zero = reflect.Zero(rt).Interface()
|
||||||
|
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||||
|
return conv.ToCtyValue().Type()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to store the new type. Use whichever was loaded first in the case
|
||||||
|
// of a race condition.
|
||||||
|
ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
|
||||||
|
val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnwrapCtyValue will unwrap capsule type values into their native cty value
|
||||||
|
// equivalents if possible.
|
||||||
|
func UnwrapCtyValue(in cty.Value) cty.Value {
|
||||||
|
want := toCtyValueType(in.Type())
|
||||||
|
if in.Type().Equals(want) {
|
||||||
|
return in
|
||||||
|
} else if out, err := convert.Convert(in, want); err == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return cty.NullVal(want)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toCtyValueType(in cty.Type) cty.Type {
|
||||||
|
if et := in.MapElementType(); et != nil {
|
||||||
|
return cty.Map(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.SetElementType(); et != nil {
|
||||||
|
return cty.Set(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.ListElementType(); et != nil {
|
||||||
|
return cty.List(toCtyValueType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsObjectType() {
|
||||||
|
var optional []string
|
||||||
|
inAttrTypes := in.AttributeTypes()
|
||||||
|
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
|
||||||
|
for name, typ := range inAttrTypes {
|
||||||
|
outAttrTypes[name] = toCtyValueType(typ)
|
||||||
|
if in.AttributeOptional(name) {
|
||||||
|
optional = append(optional, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsTupleType() {
|
||||||
|
inTypes := in.TupleElementTypes()
|
||||||
|
outTypes := make([]cty.Type, len(inTypes))
|
||||||
|
for i, typ := range inTypes {
|
||||||
|
outTypes[i] = toCtyValueType(typ)
|
||||||
|
}
|
||||||
|
return cty.Tuple(outTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsCapsuleType() {
|
||||||
|
if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
|
||||||
|
return out.(cty.Type)
|
||||||
|
}
|
||||||
|
return cty.DynamicPseudoType
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
|
||||||
|
out, err := gocty.ToCtyValue(val, ty)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
return UnwrapCtyValue(out), nil
|
||||||
|
}
|
||||||
110
bake/remote.go
110
bake/remote.go
@@ -4,27 +4,61 @@ import (
|
|||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/builder"
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/frontend/dockerui"
|
||||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const maxBakeDefinitionSize = 2 * 1024 * 1024 // 2 MB
|
||||||
|
|
||||||
type Input struct {
|
type Input struct {
|
||||||
State *llb.State
|
State *llb.State
|
||||||
URL string
|
URL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||||
|
var sessions []session.Attachable
|
||||||
var filename string
|
var filename string
|
||||||
st, ok := detectGitContext(url)
|
|
||||||
if !ok {
|
st, ok := dockerui.DetectGitContext(url, false)
|
||||||
st, filename, ok = detectHTTPContext(url)
|
if ok {
|
||||||
|
if ssh, err := controllerapi.CreateSSH([]*controllerapi.SSH{{
|
||||||
|
ID: "default",
|
||||||
|
Paths: strings.Split(os.Getenv("BUILDX_BAKE_GIT_SSH"), ","),
|
||||||
|
}}); err == nil {
|
||||||
|
sessions = append(sessions, ssh)
|
||||||
|
}
|
||||||
|
var gitAuthSecrets []*controllerapi.Secret
|
||||||
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
|
||||||
|
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||||
|
ID: llb.GitAuthTokenKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
|
||||||
|
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||||
|
ID: llb.GitAuthHeaderKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if len(gitAuthSecrets) > 0 {
|
||||||
|
if secrets, err := controllerapi.CreateSecrets(gitAuthSecrets); err == nil {
|
||||||
|
sessions = append(sessions, secrets)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
st, filename, ok = dockerui.DetectHTTPContext(url)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, errors.Errorf("not url context")
|
return nil, nil, errors.Errorf("not url context")
|
||||||
}
|
}
|
||||||
@@ -33,25 +67,25 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
|||||||
inp := &Input{State: st, URL: url}
|
inp := &Input{State: st, URL: url}
|
||||||
var files []File
|
var files []File
|
||||||
|
|
||||||
var di *build.DriverInfo
|
var node *builder.Node
|
||||||
for _, d := range dis {
|
for i, n := range nodes {
|
||||||
if d.Err == nil {
|
if n.Err == nil {
|
||||||
di = &d
|
node = &nodes[i]
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if di == nil {
|
if node == nil {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := driver.Boot(ctx, ctx, di.Driver, pw)
|
c, err := driver.Boot(ctx, ctx, node.Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
_, err = c.Build(ctx, client.SolveOpt{Session: sessions, Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||||
def, err := st.Marshal(ctx)
|
def, err := st.Marshal(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -75,7 +109,6 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}, ch)
|
}, ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -83,51 +116,6 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
|||||||
return files, inp, nil
|
return files, inp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsRemoteURL(url string) bool {
|
|
||||||
if _, _, ok := detectHTTPContext(url); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := detectGitContext(url); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectHTTPContext(url string) (*llb.State, string, bool) {
|
|
||||||
if httpPrefix.MatchString(url) {
|
|
||||||
httpContext := llb.HTTP(url, llb.Filename("context"), llb.WithCustomName("[internal] load remote build context"))
|
|
||||||
return &httpContext, "context", true
|
|
||||||
}
|
|
||||||
return nil, "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectGitContext(ref string) (*llb.State, bool) {
|
|
||||||
found := false
|
|
||||||
if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) {
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, prefix := range []string{"git://", "github.com/", "git@"} {
|
|
||||||
if strings.HasPrefix(ref, prefix) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.SplitN(ref, "#", 2)
|
|
||||||
branch := ""
|
|
||||||
if len(parts) > 1 {
|
|
||||||
branch = parts[1]
|
|
||||||
}
|
|
||||||
gitOpts := []llb.GitOption{llb.WithCustomName("[internal] load git source " + ref)}
|
|
||||||
|
|
||||||
st := llb.Git(parts[0], branch, gitOpts...)
|
|
||||||
return &st, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isArchive(header []byte) bool {
|
func isArchive(header []byte) bool {
|
||||||
for _, m := range [][]byte{
|
for _, m := range [][]byte{
|
||||||
{0x42, 0x5A, 0x68}, // bzip2
|
{0x42, 0x5A, 0x68}, // bzip2
|
||||||
@@ -192,9 +180,9 @@ func filesFromURLRef(ctx context.Context, c gwclient.Client, ref gwclient.Refere
|
|||||||
name := inp.URL
|
name := inp.URL
|
||||||
inp.URL = ""
|
inp.URL = ""
|
||||||
|
|
||||||
if len(dt) > stat.Size() {
|
if int64(len(dt)) > stat.Size {
|
||||||
if stat.Size() > 1024*512 {
|
if stat.Size > maxBakeDefinitionSize {
|
||||||
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size")
|
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size (%s)", units.HumanSize(maxBakeDefinitionSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||||
|
|||||||
1810
build/build.go
1810
build/build.go
File diff suppressed because it is too large
Load Diff
55
build/dial.go
Normal file
55
build/dial.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
stderrors "errors"
|
||||||
|
"net"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platform *v1.Platform) (net.Conn, error) {
|
||||||
|
nodes, err := filterAvailableNodes(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, errors.New("no nodes available")
|
||||||
|
}
|
||||||
|
|
||||||
|
var pls []v1.Platform
|
||||||
|
if platform != nil {
|
||||||
|
pls = []v1.Platform{*platform}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := map[string]Options{"default": {Platforms: pls}}
|
||||||
|
resolved, err := resolveDrivers(ctx, nodes, opts, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dialError error
|
||||||
|
for _, ls := range resolved {
|
||||||
|
for _, rn := range ls {
|
||||||
|
if platform != nil {
|
||||||
|
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := nodes[rn.driverIndex].Driver.Dial(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
dialError = stderrors.Join(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Wrap(dialError, "no nodes available")
|
||||||
|
}
|
||||||
353
build/driver.go
Normal file
353
build/driver.go
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/util/flightcontrol"
|
||||||
|
"github.com/moby/buildkit/util/tracing"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resolvedNode struct {
|
||||||
|
resolver *nodeResolver
|
||||||
|
driverIndex int
|
||||||
|
platforms []specs.Platform
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) Node() builder.Node {
|
||||||
|
return dp.resolver.nodes[dp.driverIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) Client(ctx context.Context) (*client.Client, error) {
|
||||||
|
clients, err := dp.resolver.boot(ctx, []int{dp.driverIndex}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return clients[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) BuildOpts(ctx context.Context) (gateway.BuildOpts, error) {
|
||||||
|
opts, err := dp.resolver.opts(ctx, []int{dp.driverIndex}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return gateway.BuildOpts{}, err
|
||||||
|
}
|
||||||
|
return opts[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type matchMaker func(specs.Platform) platforms.MatchComparer
|
||||||
|
|
||||||
|
type cachedGroup[T any] struct {
|
||||||
|
g flightcontrol.Group[T]
|
||||||
|
cache map[int]T
|
||||||
|
cacheMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCachedGroup[T any]() cachedGroup[T] {
|
||||||
|
return cachedGroup[T]{
|
||||||
|
cache: map[int]T{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeResolver struct {
|
||||||
|
nodes []builder.Node
|
||||||
|
clients cachedGroup[*client.Client]
|
||||||
|
buildOpts cachedGroup[gateway.BuildOpts]
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||||
|
driverRes := newDriverResolver(nodes)
|
||||||
|
drivers, err := driverRes.Resolve(ctx, opt, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return drivers, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDriverResolver(nodes []builder.Node) *nodeResolver {
|
||||||
|
r := &nodeResolver{
|
||||||
|
nodes: nodes,
|
||||||
|
clients: newCachedGroup[*client.Client](),
|
||||||
|
buildOpts: newCachedGroup[gateway.BuildOpts](),
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) Resolve(ctx context.Context, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||||
|
if len(r.nodes) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes := map[string][]*resolvedNode{}
|
||||||
|
for k, opt := range opt {
|
||||||
|
node, perfect, err := r.resolve(ctx, opt.Platforms, pw, platforms.OnlyStrict, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !perfect {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nodes[k] = node
|
||||||
|
}
|
||||||
|
if len(nodes) != len(opt) {
|
||||||
|
// if we didn't get a perfect match, we need to boot all drivers
|
||||||
|
allIndexes := make([]int, len(r.nodes))
|
||||||
|
for i := range allIndexes {
|
||||||
|
allIndexes[i] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
clients, err := r.boot(ctx, allIndexes, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
|
workers := make([][]specs.Platform, len(clients))
|
||||||
|
for i, c := range clients {
|
||||||
|
i, c := i, c
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
ww, err := c.ListWorkers(egCtx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "listing workers")
|
||||||
|
}
|
||||||
|
|
||||||
|
ps := make(map[string]specs.Platform, len(ww))
|
||||||
|
for _, w := range ww {
|
||||||
|
for _, p := range w.Platforms {
|
||||||
|
pk := platforms.Format(platforms.Normalize(p))
|
||||||
|
ps[pk] = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, p := range ps {
|
||||||
|
workers[i] = append(workers[i], p)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// then we can attempt to match against all the available platforms
|
||||||
|
// (this time we don't care about imperfect matches)
|
||||||
|
nodes = map[string][]*resolvedNode{}
|
||||||
|
for k, opt := range opt {
|
||||||
|
node, _, err := r.resolve(ctx, opt.Platforms, pw, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||||
|
return workers[idx]
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nodes[k] = node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idxs := make([]int, 0, len(r.nodes))
|
||||||
|
for _, nodes := range nodes {
|
||||||
|
for _, node := range nodes {
|
||||||
|
idxs = append(idxs, node.driverIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// preload capabilities
|
||||||
|
span, ctx := tracing.StartSpan(ctx, "load buildkit capabilities", trace.WithSpanKind(trace.SpanKindInternal))
|
||||||
|
_, err := r.opts(ctx, idxs, pw)
|
||||||
|
tracing.FinishWithError(span, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) resolve(ctx context.Context, ps []specs.Platform, pw progress.Writer, matcher matchMaker, additional func(idx int, n builder.Node) []specs.Platform) ([]*resolvedNode, bool, error) {
|
||||||
|
if len(r.nodes) == 0 {
|
||||||
|
return nil, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
perfect := true
|
||||||
|
nodeIdxs := make([]int, 0)
|
||||||
|
for _, p := range ps {
|
||||||
|
idx := r.get(p, matcher, additional)
|
||||||
|
if idx == -1 {
|
||||||
|
idx = 0
|
||||||
|
perfect = false
|
||||||
|
}
|
||||||
|
nodeIdxs = append(nodeIdxs, idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodes []*resolvedNode
|
||||||
|
if len(nodeIdxs) == 0 {
|
||||||
|
nodes = append(nodes, &resolvedNode{
|
||||||
|
resolver: r,
|
||||||
|
driverIndex: 0,
|
||||||
|
})
|
||||||
|
nodeIdxs = append(nodeIdxs, 0)
|
||||||
|
} else {
|
||||||
|
for i, idx := range nodeIdxs {
|
||||||
|
node := &resolvedNode{
|
||||||
|
resolver: r,
|
||||||
|
driverIndex: idx,
|
||||||
|
}
|
||||||
|
if len(ps) > 0 {
|
||||||
|
node.platforms = []specs.Platform{ps[i]}
|
||||||
|
}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = recombineNodes(nodes)
|
||||||
|
if _, err := r.boot(ctx, nodeIdxs, pw); err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return nodes, perfect, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatforms func(int, builder.Node) []specs.Platform) int {
|
||||||
|
best := -1
|
||||||
|
bestPlatform := specs.Platform{}
|
||||||
|
for i, node := range r.nodes {
|
||||||
|
platforms := node.Platforms
|
||||||
|
if additionalPlatforms != nil {
|
||||||
|
platforms = slices.Clone(platforms)
|
||||||
|
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||||
|
}
|
||||||
|
for _, p2 := range platforms {
|
||||||
|
m := matcher(p2)
|
||||||
|
if !m.Match(p) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if best == -1 {
|
||||||
|
best = i
|
||||||
|
bestPlatform = p2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if matcher(p2).Less(p, bestPlatform) {
|
||||||
|
best = i
|
||||||
|
bestPlatform = p2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return best
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) boot(ctx context.Context, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
||||||
|
clients := make([]*client.Client, len(idxs))
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
for i, idx := range idxs {
|
||||||
|
i, idx := i, idx
|
||||||
|
eg.Go(func() error {
|
||||||
|
c, err := r.clients.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (*client.Client, error) {
|
||||||
|
if r.nodes[idx].Driver == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
r.clients.cacheMu.Lock()
|
||||||
|
c, ok := r.clients.cache[idx]
|
||||||
|
r.clients.cacheMu.Unlock()
|
||||||
|
if ok {
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
c, err := driver.Boot(ctx, baseCtx, r.nodes[idx].Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.clients.cacheMu.Lock()
|
||||||
|
r.clients.cache[idx] = c
|
||||||
|
r.clients.cacheMu.Unlock()
|
||||||
|
return c, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clients[i] = c
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return clients, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) opts(ctx context.Context, idxs []int, pw progress.Writer) ([]gateway.BuildOpts, error) {
|
||||||
|
clients, err := r.boot(ctx, idxs, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bopts := make([]gateway.BuildOpts, len(clients))
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, idxs := range idxs {
|
||||||
|
i, idx := i, idxs
|
||||||
|
c := clients[i]
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
opt, err := r.buildOpts.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (gateway.BuildOpts, error) {
|
||||||
|
r.buildOpts.cacheMu.Lock()
|
||||||
|
opt, ok := r.buildOpts.cache[idx]
|
||||||
|
r.buildOpts.cacheMu.Unlock()
|
||||||
|
if ok {
|
||||||
|
return opt, nil
|
||||||
|
}
|
||||||
|
_, err := c.Build(ctx, client.SolveOpt{
|
||||||
|
Internal: true,
|
||||||
|
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
opt = c.BuildOpts()
|
||||||
|
return nil, nil
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return gateway.BuildOpts{}, err
|
||||||
|
}
|
||||||
|
r.buildOpts.cacheMu.Lock()
|
||||||
|
r.buildOpts.cache[idx] = opt
|
||||||
|
r.buildOpts.cacheMu.Unlock()
|
||||||
|
return opt, err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bopts[i] = opt
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bopts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// recombineDriverPairs recombines resolved nodes that are on the same driver
|
||||||
|
// back together into a single node.
|
||||||
|
func recombineNodes(nodes []*resolvedNode) []*resolvedNode {
|
||||||
|
result := make([]*resolvedNode, 0, len(nodes))
|
||||||
|
lookup := map[int]int{}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if idx, ok := lookup[node.driverIndex]; ok {
|
||||||
|
result[idx].platforms = append(result[idx].platforms, node.platforms...)
|
||||||
|
} else {
|
||||||
|
lookup[node.driverIndex] = len(result)
|
||||||
|
result = append(result, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
315
build/driver_test.go
Normal file
315
build/driver_test.go
Normal file
@@ -0,0 +1,315 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFindDriverSanity(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.DefaultSpec()},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.OnlyStrict, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, []specs.Platform{platforms.DefaultSpec()}, res[0].platforms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverEmpty(t *testing.T) {
|
||||||
|
r := makeTestResolver(nil)
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Nil(t, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverWeirdName(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/foobar")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// find first platform
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/foobar")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverUnknown(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeSinglePlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// find first platform
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// find second platform
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// find an unknown platform, should match the first driver
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/s390x")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeMultiPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrict(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// arm64 should match itself
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// arm64 may support arm/v8
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// arm64 may support arm/v7
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrictARM(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm64")},
|
||||||
|
"ccc": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrictLower(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// v8 can't be built on v7 (so we should select the default)...
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// ...but v6 can be built on v8
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v6")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodePreferStart(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
"ccc": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodePreferExact(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNoPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/foobar")},
|
||||||
|
"bbb": {platforms.DefaultSpec()},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Empty(t, res[0].platforms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeAdditionalPlatforms(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||||
|
if n.Builder == "aaa" {
|
||||||
|
return []specs.Platform{platforms.MustParse("linux/arm/v7")}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplitNodeMultiPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/arm64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/riscv64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 2)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplitNodeMultiPlatformNoUnify(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// the "best" choice would be the node with both platforms, but we're using
|
||||||
|
// a naive algorithm that doesn't try to unify the platforms
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/riscv64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 2)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestResolver(nodes map[string][]specs.Platform) *nodeResolver {
|
||||||
|
var ns []builder.Node
|
||||||
|
for name, platforms := range nodes {
|
||||||
|
ns = append(ns, builder.Node{
|
||||||
|
Builder: name,
|
||||||
|
Platforms: platforms,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Slice(ns, func(i, j int) bool {
|
||||||
|
return ns[i].Builder < ns[j].Builder
|
||||||
|
})
|
||||||
|
return newDriverResolver(ns)
|
||||||
|
}
|
||||||
159
build/git.go
Normal file
159
build/git.go
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"maps"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
||||||
|
|
||||||
|
type gitAttrsAppendFunc func(so *client.SolveOpt)
|
||||||
|
|
||||||
|
func gitAppendNoneFunc(_ *client.SolveOpt) {}
|
||||||
|
|
||||||
|
func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) {
|
||||||
|
defer func() {
|
||||||
|
if f == nil {
|
||||||
|
f = gitAppendNoneFunc
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if contextPath == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
setGitLabels := false
|
||||||
|
if v, ok := os.LookupEnv("BUILDX_GIT_LABELS"); ok {
|
||||||
|
if v == "full" { // backward compatibility with old "full" mode
|
||||||
|
setGitLabels = true
|
||||||
|
} else if v, err := strconv.ParseBool(v); err == nil {
|
||||||
|
setGitLabels = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
setGitInfo := true
|
||||||
|
if v, ok := os.LookupEnv("BUILDX_GIT_INFO"); ok {
|
||||||
|
if v, err := strconv.ParseBool(v); err == nil {
|
||||||
|
setGitInfo = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !setGitLabels && !setGitInfo {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// figure out in which directory the git command needs to run in
|
||||||
|
var wd string
|
||||||
|
if filepath.IsAbs(contextPath) {
|
||||||
|
wd = contextPath
|
||||||
|
} else {
|
||||||
|
wd, _ = filepath.Abs(filepath.Join(osutil.GetWd(), contextPath))
|
||||||
|
}
|
||||||
|
wd = osutil.SanitizePath(wd)
|
||||||
|
|
||||||
|
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||||
|
if err != nil {
|
||||||
|
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||||
|
return nil, errors.Wrap(err, "git was not found in the system")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !gitc.IsInsideWorkTree() {
|
||||||
|
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||||
|
return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := gitc.RootDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get git root dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(map[string]string)
|
||||||
|
|
||||||
|
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
||||||
|
return nil, errors.Wrap(err, "failed to get git commit")
|
||||||
|
} else if sha != "" {
|
||||||
|
checkDirty := false
|
||||||
|
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
||||||
|
if v, err := strconv.ParseBool(v); err == nil {
|
||||||
|
checkDirty = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if checkDirty && gitc.IsDirty() {
|
||||||
|
sha += "-dirty"
|
||||||
|
}
|
||||||
|
if setGitLabels {
|
||||||
|
res["label:"+specs.AnnotationRevision] = sha
|
||||||
|
}
|
||||||
|
if setGitInfo {
|
||||||
|
res["vcs:revision"] = sha
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rurl, err := gitc.RemoteURL(); err == nil && rurl != "" {
|
||||||
|
if setGitLabels {
|
||||||
|
res["label:"+specs.AnnotationSource] = rurl
|
||||||
|
}
|
||||||
|
if setGitInfo {
|
||||||
|
res["vcs:source"] = rurl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if setGitLabels && root != "" {
|
||||||
|
if dockerfilePath == "" {
|
||||||
|
dockerfilePath = filepath.Join(wd, "Dockerfile")
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(dockerfilePath) {
|
||||||
|
dockerfilePath = filepath.Join(osutil.GetWd(), dockerfilePath)
|
||||||
|
}
|
||||||
|
if r, err := filepath.Rel(root, dockerfilePath); err == nil && !strings.HasPrefix(r, "..") {
|
||||||
|
res["label:"+DockerfileLabel] = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(so *client.SolveOpt) {
|
||||||
|
if so.FrontendAttrs == nil {
|
||||||
|
so.FrontendAttrs = make(map[string]string)
|
||||||
|
}
|
||||||
|
maps.Copy(so.FrontendAttrs, res)
|
||||||
|
|
||||||
|
if !setGitInfo || root == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, mount := range so.LocalMounts {
|
||||||
|
fs, ok := mount.(*fs)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir, err = filepath.Abs(dir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||||
|
dir = lp
|
||||||
|
}
|
||||||
|
dir = osutil.SanitizePath(dir)
|
||||||
|
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||||
|
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
222
build/git_test.go
Normal file
222
build/git_test.go
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/buildx/util/gitutil/gittestutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupTest(tb testing.TB) {
|
||||||
|
gittestutil.Mktmp(tb)
|
||||||
|
|
||||||
|
c, err := gitutil.New()
|
||||||
|
require.NoError(tb, err)
|
||||||
|
gittestutil.GitInit(c, tb)
|
||||||
|
|
||||||
|
df := []byte("FROM alpine:latest\n")
|
||||||
|
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||||
|
|
||||||
|
gittestutil.GitAdd(c, tb, "Dockerfile")
|
||||||
|
gittestutil.GitCommit(c, tb, "initial commit")
|
||||||
|
gittestutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||||
|
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||||
|
tmp := t.TempDir()
|
||||||
|
require.NoError(t, os.MkdirAll(path.Join(tmp, ".git"), 0755))
|
||||||
|
|
||||||
|
_, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitAttributesNoContext(t *testing.T) {
|
||||||
|
setupTest(t)
|
||||||
|
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var so client.SolveOpt
|
||||||
|
addGitAttrs(&so)
|
||||||
|
assert.Empty(t, so.FrontendAttrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitAttributes(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
envGitLabels string
|
||||||
|
envGitInfo string
|
||||||
|
expected []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default",
|
||||||
|
envGitLabels: "",
|
||||||
|
envGitInfo: "",
|
||||||
|
expected: []string{
|
||||||
|
"vcs:revision",
|
||||||
|
"vcs:source",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "none",
|
||||||
|
envGitLabels: "false",
|
||||||
|
envGitInfo: "false",
|
||||||
|
expected: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "gitinfo",
|
||||||
|
envGitLabels: "false",
|
||||||
|
envGitInfo: "true",
|
||||||
|
expected: []string{
|
||||||
|
"vcs:revision",
|
||||||
|
"vcs:source",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "gitlabels",
|
||||||
|
envGitLabels: "true",
|
||||||
|
envGitInfo: "false",
|
||||||
|
expected: []string{
|
||||||
|
"label:" + DockerfileLabel,
|
||||||
|
"label:" + specs.AnnotationRevision,
|
||||||
|
"label:" + specs.AnnotationSource,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "both",
|
||||||
|
envGitLabels: "true",
|
||||||
|
envGitInfo: "",
|
||||||
|
expected: []string{
|
||||||
|
"label:" + DockerfileLabel,
|
||||||
|
"label:" + specs.AnnotationRevision,
|
||||||
|
"label:" + specs.AnnotationSource,
|
||||||
|
"vcs:revision",
|
||||||
|
"vcs:source",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range cases {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
setupTest(t)
|
||||||
|
if tt.envGitLabels != "" {
|
||||||
|
t.Setenv("BUILDX_GIT_LABELS", tt.envGitLabels)
|
||||||
|
}
|
||||||
|
if tt.envGitInfo != "" {
|
||||||
|
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
||||||
|
}
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var so client.SolveOpt
|
||||||
|
addGitAttrs(&so)
|
||||||
|
for _, e := range tt.expected {
|
||||||
|
assert.Contains(t, so.FrontendAttrs, e)
|
||||||
|
assert.NotEmpty(t, so.FrontendAttrs[e])
|
||||||
|
if e == "label:"+DockerfileLabel {
|
||||||
|
assert.Equal(t, "Dockerfile", so.FrontendAttrs[e])
|
||||||
|
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
||||||
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGitAttributesDirty(t *testing.T) {
|
||||||
|
setupTest(t)
|
||||||
|
t.Setenv("BUILDX_GIT_CHECK_DIRTY", "true")
|
||||||
|
|
||||||
|
// make a change to test dirty flag
|
||||||
|
df := []byte("FROM alpine:edge\n")
|
||||||
|
require.NoError(t, os.Mkdir("dir", 0755))
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
||||||
|
|
||||||
|
t.Setenv("BUILDX_GIT_LABELS", "true")
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var so client.SolveOpt
|
||||||
|
addGitAttrs(&so)
|
||||||
|
|
||||||
|
assert.Equal(t, 5, len(so.FrontendAttrs))
|
||||||
|
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel)
|
||||||
|
assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource)
|
||||||
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision)
|
||||||
|
assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||||
|
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "vcs:source")
|
||||||
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "vcs:revision")
|
||||||
|
assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalDirs(t *testing.T) {
|
||||||
|
setupTest(t)
|
||||||
|
|
||||||
|
so := &client.SolveOpt{
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, setLocalMount("context", ".", so))
|
||||||
|
require.NoError(t, setLocalMount("dockerfile", ".", so))
|
||||||
|
|
||||||
|
addGitAttrs(so)
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalDirsSub(t *testing.T) {
|
||||||
|
gittestutil.Mktmp(t)
|
||||||
|
|
||||||
|
c, err := gitutil.New()
|
||||||
|
require.NoError(t, err)
|
||||||
|
gittestutil.GitInit(c, t)
|
||||||
|
|
||||||
|
df := []byte("FROM alpine:latest\n")
|
||||||
|
require.NoError(t, os.MkdirAll("app", 0755))
|
||||||
|
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||||
|
|
||||||
|
gittestutil.GitAdd(c, t, "app/Dockerfile")
|
||||||
|
gittestutil.GitCommit(c, t, "initial commit")
|
||||||
|
gittestutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
||||||
|
|
||||||
|
so := &client.SolveOpt{
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
}
|
||||||
|
require.NoError(t, setLocalMount("context", ".", so))
|
||||||
|
require.NoError(t, setLocalMount("dockerfile", "app", so))
|
||||||
|
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
addGitAttrs(so)
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||||
|
assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||||
|
}
|
||||||
138
build/invoke.go
Normal file
138
build/invoke.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
_ "crypto/sha256" // ensure digests can be computed
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Container struct {
|
||||||
|
cancelOnce sync.Once
|
||||||
|
containerCancel func(error)
|
||||||
|
isUnavailable atomic.Bool
|
||||||
|
initStarted atomic.Bool
|
||||||
|
container gateway.Container
|
||||||
|
releaseCh chan struct{}
|
||||||
|
resultCtx *ResultHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||||
|
mainCtx := ctx
|
||||||
|
|
||||||
|
ctrCh := make(chan *Container)
|
||||||
|
errCh := make(chan error)
|
||||||
|
go func() {
|
||||||
|
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
|
go func() {
|
||||||
|
<-mainCtx.Done()
|
||||||
|
cancel(errors.WithStack(context.Canceled))
|
||||||
|
}()
|
||||||
|
|
||||||
|
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||||
|
defer containerCancel(errors.WithStack(context.Canceled))
|
||||||
|
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
releaseCh := make(chan struct{})
|
||||||
|
container := &Container{
|
||||||
|
containerCancel: containerCancel,
|
||||||
|
container: bkContainer,
|
||||||
|
releaseCh: releaseCh,
|
||||||
|
resultCtx: resultCtx,
|
||||||
|
}
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
defer close(doneCh)
|
||||||
|
resultCtx.registerCleanup(func() {
|
||||||
|
container.Cancel()
|
||||||
|
<-doneCh
|
||||||
|
})
|
||||||
|
ctrCh <- container
|
||||||
|
<-container.releaseCh
|
||||||
|
|
||||||
|
return nil, bkContainer.Release(ctx)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case ctr := <-ctrCh:
|
||||||
|
return ctr, nil
|
||||||
|
case err := <-errCh:
|
||||||
|
return nil, err
|
||||||
|
case <-mainCtx.Done():
|
||||||
|
return nil, mainCtx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) Cancel() {
|
||||||
|
c.markUnavailable()
|
||||||
|
c.cancelOnce.Do(func() {
|
||||||
|
if c.containerCancel != nil {
|
||||||
|
c.containerCancel(errors.WithStack(context.Canceled))
|
||||||
|
}
|
||||||
|
close(c.releaseCh)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) IsUnavailable() bool {
|
||||||
|
return c.isUnavailable.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) markUnavailable() {
|
||||||
|
c.isUnavailable.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||||
|
if isInit := c.initStarted.CompareAndSwap(false, true); isInit {
|
||||||
|
defer func() {
|
||||||
|
// container can't be used after init exits
|
||||||
|
c.markUnavailable()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
err := exec(ctx, c.resultCtx, cfg, c.container, stdin, stdout, stderr)
|
||||||
|
if err != nil {
|
||||||
|
// Container becomes unavailable if one of the processes fails in it.
|
||||||
|
c.markUnavailable()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||||
|
processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
proc, err := ctr.Start(ctx, processCfg)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("failed to start container: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
defer close(doneCh)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
if err := proc.Signal(ctx, syscall.SIGKILL); err != nil {
|
||||||
|
logrus.Warnf("failed to kill process: %v", err)
|
||||||
|
}
|
||||||
|
case <-doneCh:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return proc.Wait()
|
||||||
|
}
|
||||||
44
build/localstate.go
Normal file
44
build/localstate.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func saveLocalState(so *client.SolveOpt, target string, opts Options, node builder.Node, cfg *confutil.Config) error {
|
||||||
|
var err error
|
||||||
|
if so.Ref == "" || opts.CallFunc != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lp := opts.Inputs.ContextPath
|
||||||
|
dp := opts.Inputs.DockerfilePath
|
||||||
|
if dp != "" && !IsRemoteURL(lp) && lp != "-" && dp != "-" {
|
||||||
|
dp, err = filepath.Abs(dp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lp != "" && !IsRemoteURL(lp) && lp != "-" {
|
||||||
|
lp, err = filepath.Abs(lp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lp == "" && dp == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l, err := localstate.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.SaveRef(node.Builder, node.Name, so.Ref, localstate.State{
|
||||||
|
Target: target,
|
||||||
|
LocalPath: lp,
|
||||||
|
DockerfilePath: dp,
|
||||||
|
GroupRef: opts.GroupRef,
|
||||||
|
})
|
||||||
|
}
|
||||||
657
build/opt.go
Normal file
657
build/opt.go
Normal file
@@ -0,0 +1,657 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
|
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/distribution/reference"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/client/ociindex"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/tonistiigi/fsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *Options, bopts gateway.BuildOpts, cfg *confutil.Config, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||||
|
nodeDriver := node.Driver
|
||||||
|
defers := make([]func(), 0, 2)
|
||||||
|
releaseF := func() {
|
||||||
|
for _, f := range defers {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
releaseF()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// inline cache from build arg
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_CACHE"]; ok {
|
||||||
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
|
opt.CacheTo = append(opt.CacheTo, client.CacheOptionsEntry{
|
||||||
|
Type: "inline",
|
||||||
|
Attrs: map[string]string{},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range opt.CacheTo {
|
||||||
|
if e.Type != "inline" && !nodeDriver.Features(ctx)[driver.CacheExport] {
|
||||||
|
return nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo := make([]client.CacheOptionsEntry, 0, len(opt.CacheTo))
|
||||||
|
for _, e := range opt.CacheTo {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if e.Type == "s3" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheTo = append(cacheTo, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheFrom := make([]client.CacheOptionsEntry, 0, len(opt.CacheFrom))
|
||||||
|
for _, e := range opt.CacheFrom {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if e.Type == "s3" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheFrom = append(cacheFrom, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
so := client.SolveOpt{
|
||||||
|
Ref: opt.Ref,
|
||||||
|
Frontend: "dockerfile.v0",
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
LocalMounts: map[string]fsutil.FS{},
|
||||||
|
CacheExports: cacheTo,
|
||||||
|
CacheImports: cacheFrom,
|
||||||
|
AllowedEntitlements: opt.Allow,
|
||||||
|
SourcePolicy: opt.SourcePolicy,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.CgroupParent != "" {
|
||||||
|
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||||
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if multiDriver {
|
||||||
|
// force creation of manifest list
|
||||||
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
attests := make(map[string]string)
|
||||||
|
for k, v := range opt.Attests {
|
||||||
|
if v != nil {
|
||||||
|
attests[k] = *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
supportAttestations := bopts.LLBCaps.Contains(apicaps.CapID("exporter.image.attestations")) && nodeDriver.Features(ctx)[driver.MultiPlatform]
|
||||||
|
if len(attests) > 0 {
|
||||||
|
if !supportAttestations {
|
||||||
|
if !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||||
|
return nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||||
|
}
|
||||||
|
for k, v := range attests {
|
||||||
|
so.FrontendAttrs["attest:"+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := opt.Attests["provenance"]; !ok && supportAttestations {
|
||||||
|
const noAttestEnv = "BUILDX_NO_DEFAULT_ATTESTATIONS"
|
||||||
|
var noProv bool
|
||||||
|
if v, ok := os.LookupEnv(noAttestEnv); ok {
|
||||||
|
noProv, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !noProv {
|
||||||
|
so.FrontendAttrs["attest:provenance"] = "mode=min,inline-only=true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(opt.Exports) {
|
||||||
|
case 1:
|
||||||
|
// valid
|
||||||
|
case 0:
|
||||||
|
if !noDefaultLoad() && opt.CallFunc == nil {
|
||||||
|
if nodeDriver.IsMobyDriver() {
|
||||||
|
// backwards compat for docker driver only:
|
||||||
|
// this ensures the build results in a docker image.
|
||||||
|
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
||||||
|
} else if nodeDriver.Features(ctx)[driver.DefaultLoad] {
|
||||||
|
opt.Exports = []client.ExportEntry{{Type: "docker", Attrs: map[string]string{}}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if err := bopts.LLBCaps.Supports(pb.CapMultipleExporters); err != nil {
|
||||||
|
return nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fill in image exporter names from tags
|
||||||
|
if len(opt.Tags) > 0 {
|
||||||
|
tags := make([]string, len(opt.Tags))
|
||||||
|
for i, tag := range opt.Tags {
|
||||||
|
ref, err := reference.Parse(tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||||
|
}
|
||||||
|
tags[i] = ref.String()
|
||||||
|
}
|
||||||
|
for i, e := range opt.Exports {
|
||||||
|
switch e.Type {
|
||||||
|
case "image", "oci", "docker":
|
||||||
|
opt.Exports[i].Attrs["name"] = strings.Join(tags, ",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, e := range opt.Exports {
|
||||||
|
if e.Type == "image" && e.Attrs["name"] == "" && e.Attrs["push"] != "" {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
return nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheonly is a fake exporter to opt out of default behaviors
|
||||||
|
exports := make([]client.ExportEntry, 0, len(opt.Exports))
|
||||||
|
for _, e := range opt.Exports {
|
||||||
|
if e.Type != "cacheonly" {
|
||||||
|
exports = append(exports, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
opt.Exports = exports
|
||||||
|
|
||||||
|
// set up exporters
|
||||||
|
for i, e := range opt.Exports {
|
||||||
|
if e.Type == "oci" && !nodeDriver.Features(ctx)[driver.OCIExporter] {
|
||||||
|
return nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||||
|
}
|
||||||
|
if e.Type == "docker" {
|
||||||
|
features := docker.Features(ctx, e.Attrs["context"])
|
||||||
|
if features[dockerutil.OCIImporter] && e.Output == nil {
|
||||||
|
// rely on oci importer if available (which supports
|
||||||
|
// multi-platform images), otherwise fall back to docker
|
||||||
|
opt.Exports[i].Type = "oci"
|
||||||
|
} else if len(opt.Platforms) > 1 || len(attests) > 0 {
|
||||||
|
if e.Output != nil {
|
||||||
|
return nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||||
|
}
|
||||||
|
if e.Output == nil {
|
||||||
|
if nodeDriver.IsMobyDriver() {
|
||||||
|
e.Type = "image"
|
||||||
|
} else {
|
||||||
|
w, cancel, err := docker.LoadImage(ctx, e.Attrs["context"], pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defers = append(defers, cancel)
|
||||||
|
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||||
|
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||||
|
opt.Exports[i].Type = "moby"
|
||||||
|
if e.Attrs["push"] != "" {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||||
|
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||||
|
// inline buildinfo attrs from build arg
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||||
|
opt.Exports[i].Attrs["buildinfo-attrs"] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
so.Exports = opt.Exports
|
||||||
|
so.Session = slices.Clone(opt.Session)
|
||||||
|
|
||||||
|
releaseLoad, err := loadInputs(ctx, nodeDriver, &opt.Inputs, pw, &so)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defers = append(defers, releaseLoad)
|
||||||
|
|
||||||
|
// add node identifier to shared key if one was specified
|
||||||
|
if so.SharedKey != "" {
|
||||||
|
so.SharedKey += ":" + cfg.TryNodeIdentifier()
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Pull {
|
||||||
|
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModeForcePull
|
||||||
|
} else if nodeDriver.IsMobyDriver() {
|
||||||
|
// moby driver always resolves local images by default
|
||||||
|
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModePreferLocal
|
||||||
|
}
|
||||||
|
if opt.Target != "" {
|
||||||
|
so.FrontendAttrs["target"] = opt.Target
|
||||||
|
}
|
||||||
|
if len(opt.NoCacheFilter) > 0 {
|
||||||
|
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
||||||
|
}
|
||||||
|
if opt.NoCache {
|
||||||
|
so.FrontendAttrs["no-cache"] = ""
|
||||||
|
}
|
||||||
|
for k, v := range opt.BuildArgs {
|
||||||
|
so.FrontendAttrs["build-arg:"+k] = v
|
||||||
|
}
|
||||||
|
for k, v := range opt.Labels {
|
||||||
|
so.FrontendAttrs["label:"+k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range node.ProxyConfig {
|
||||||
|
if _, ok := opt.BuildArgs[k]; !ok {
|
||||||
|
so.FrontendAttrs["build-arg:"+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set platforms
|
||||||
|
if len(opt.Platforms) != 0 {
|
||||||
|
pp := make([]string, len(opt.Platforms))
|
||||||
|
for i, p := range opt.Platforms {
|
||||||
|
pp[i] = platforms.Format(p)
|
||||||
|
}
|
||||||
|
if len(pp) > 1 && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||||
|
return nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||||
|
}
|
||||||
|
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup networkmode
|
||||||
|
switch opt.NetworkMode {
|
||||||
|
case "host":
|
||||||
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
|
||||||
|
case "none":
|
||||||
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
|
case "", "default":
|
||||||
|
default:
|
||||||
|
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup extrahosts
|
||||||
|
extraHosts, err := toBuildkitExtraHosts(ctx, opt.ExtraHosts, nodeDriver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(extraHosts) > 0 {
|
||||||
|
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup shm size
|
||||||
|
if opt.ShmSize.Value() > 0 {
|
||||||
|
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup ulimits
|
||||||
|
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
} else if len(ulimits) > 0 {
|
||||||
|
so.FrontendAttrs["ulimit"] = ulimits
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark call request as internal
|
||||||
|
if opt.CallFunc != nil {
|
||||||
|
so.Internal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return &so, releaseF, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||||
|
if inp.ContextPath == "" {
|
||||||
|
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: handle stdin, symlinks, remote contexts, check files exist
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
dockerfileReader io.ReadCloser
|
||||||
|
dockerfileDir string
|
||||||
|
dockerfileName = inp.DockerfilePath
|
||||||
|
dockerfileSrcName = inp.DockerfilePath
|
||||||
|
toRemove []string
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case inp.ContextState != nil:
|
||||||
|
if target.FrontendInputs == nil {
|
||||||
|
target.FrontendInputs = make(map[string]llb.State)
|
||||||
|
}
|
||||||
|
target.FrontendInputs["context"] = *inp.ContextState
|
||||||
|
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||||
|
case inp.ContextPath == "-":
|
||||||
|
if inp.DockerfilePath == "-" {
|
||||||
|
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
rc := inp.InStream.NewReadCloser()
|
||||||
|
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||||
|
}
|
||||||
|
if !(err == io.EOF && len(magic) == 0) {
|
||||||
|
if isArchive(magic) {
|
||||||
|
// stdin is context
|
||||||
|
up := uploadprovider.New()
|
||||||
|
target.FrontendAttrs["context"] = up.Add(rc)
|
||||||
|
target.Session = append(target.Session, up)
|
||||||
|
} else {
|
||||||
|
if inp.DockerfilePath != "" {
|
||||||
|
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||||
|
}
|
||||||
|
// stdin is dockerfile
|
||||||
|
dockerfileReader = rc
|
||||||
|
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||||
|
toRemove = append(toRemove, inp.ContextPath)
|
||||||
|
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case osutil.IsLocalDir(inp.ContextPath):
|
||||||
|
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sharedKey := inp.ContextPath
|
||||||
|
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||||
|
sharedKey = filepath.Base(p)
|
||||||
|
}
|
||||||
|
target.SharedKey = sharedKey
|
||||||
|
switch inp.DockerfilePath {
|
||||||
|
case "-":
|
||||||
|
dockerfileReader = inp.InStream.NewReadCloser()
|
||||||
|
case "":
|
||||||
|
dockerfileDir = inp.ContextPath
|
||||||
|
default:
|
||||||
|
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||||
|
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||||
|
}
|
||||||
|
case IsRemoteURL(inp.ContextPath):
|
||||||
|
if inp.DockerfilePath == "-" {
|
||||||
|
dockerfileReader = inp.InStream.NewReadCloser()
|
||||||
|
} else if filepath.IsAbs(inp.DockerfilePath) {
|
||||||
|
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||||
|
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
}
|
||||||
|
target.FrontendAttrs["context"] = inp.ContextPath
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inp.DockerfileInline != "" {
|
||||||
|
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||||
|
dockerfileSrcName = "inline"
|
||||||
|
} else if inp.DockerfilePath == "-" {
|
||||||
|
dockerfileSrcName = "stdin"
|
||||||
|
} else if inp.DockerfilePath == "" {
|
||||||
|
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileReader != nil {
|
||||||
|
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toRemove = append(toRemove, dockerfileDir)
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
}
|
||||||
|
if isHTTPURL(inp.DockerfilePath) {
|
||||||
|
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toRemove = append(toRemove, dockerfileDir)
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
delete(target.FrontendInputs, "dockerfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileName == "" {
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileDir != "" {
|
||||||
|
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
target.FrontendAttrs["filename"] = dockerfileName
|
||||||
|
|
||||||
|
for k, v := range inp.NamedContexts {
|
||||||
|
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
||||||
|
if v.State != nil {
|
||||||
|
target.FrontendAttrs["context:"+k] = "input:" + k
|
||||||
|
if target.FrontendInputs == nil {
|
||||||
|
target.FrontendInputs = make(map[string]llb.State)
|
||||||
|
}
|
||||||
|
target.FrontendInputs[k] = *v.State
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsRemoteURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
||||||
|
target.FrontendAttrs["context:"+k] = v.Path
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle OCI layout
|
||||||
|
if strings.HasPrefix(v.Path, "oci-layout://") {
|
||||||
|
localPath := strings.TrimPrefix(v.Path, "oci-layout://")
|
||||||
|
localPath, dig, hasDigest := strings.Cut(localPath, "@")
|
||||||
|
localPath, tag, hasTag := strings.Cut(localPath, ":")
|
||||||
|
if !hasTag {
|
||||||
|
tag = "latest"
|
||||||
|
}
|
||||||
|
if !hasDigest {
|
||||||
|
dig, err = resolveDigest(localPath, tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store, err := local.NewStore(localPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||||
|
}
|
||||||
|
storeName := identity.NewID()
|
||||||
|
if target.OCIStores == nil {
|
||||||
|
target.OCIStores = map[string]content.Store{}
|
||||||
|
}
|
||||||
|
target.OCIStores[storeName] = store
|
||||||
|
|
||||||
|
target.FrontendAttrs["context:"+k] = "oci-layout://" + storeName + ":" + tag + "@" + dig
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
st, err := os.Stat(v.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||||
|
}
|
||||||
|
if !st.IsDir() {
|
||||||
|
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||||
|
}
|
||||||
|
localName := k
|
||||||
|
if k == "context" || k == "dockerfile" {
|
||||||
|
localName = "_" + k // underscore to avoid collisions
|
||||||
|
}
|
||||||
|
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||||
|
}
|
||||||
|
|
||||||
|
release := func() {
|
||||||
|
for _, dir := range toRemove {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inp.DockerfileMappingSrc = dockerfileSrcName
|
||||||
|
inp.DockerfileMappingDst = dockerfileName
|
||||||
|
return release, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||||
|
idx := ociindex.NewStoreIndex(localPath)
|
||||||
|
|
||||||
|
// lookup by name
|
||||||
|
desc, err := idx.Get(tag)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
// lookup single
|
||||||
|
desc, err = idx.GetSingle()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
return "", errors.New("failed to resolve digest")
|
||||||
|
}
|
||||||
|
|
||||||
|
dig = string(desc.Digest)
|
||||||
|
_, err = digest.Parse(dig)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "invalid digest %s", dig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLocalMount(name, dir string, so *client.SolveOpt) error {
|
||||||
|
lm, err := fsutil.NewFS(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if so.LocalMounts == nil {
|
||||||
|
so.LocalMounts = map[string]fsutil.FS{}
|
||||||
|
}
|
||||||
|
so.LocalMounts[name] = &fs{FS: lm, dir: dir}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) {
|
||||||
|
dir, err := os.MkdirTemp("", "dockerfile")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
f, err := os.Create(filepath.Join(dir, "Dockerfile"))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if multiReader != nil {
|
||||||
|
dt, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
multiReader.Reset(dt)
|
||||||
|
r = bytes.NewReader(dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, r); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return dir, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle https://github.com/moby/moby/pull/10858
|
||||||
|
func handleLowercaseDockerfile(dir, p string) string {
|
||||||
|
if filepath.Base(p) != "Dockerfile" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(filepath.Dir(filepath.Join(dir, p)))
|
||||||
|
if err != nil {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
names, err := f.Readdirnames(-1)
|
||||||
|
if err != nil {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
foundLowerCase := false
|
||||||
|
for _, n := range names {
|
||||||
|
if n == "Dockerfile" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
if n == "dockerfile" {
|
||||||
|
foundLowerCase = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundLowerCase {
|
||||||
|
return filepath.Join(filepath.Dir(p), "dockerfile")
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
type fs struct {
|
||||||
|
fsutil.FS
|
||||||
|
dir string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fsutil.FS = &fs{}
|
||||||
156
build/provenance.go
Normal file
156
build/provenance.go
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"maps"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type provenancePredicate struct {
|
||||||
|
Builder *provenanceBuilder `json:"builder,omitempty"`
|
||||||
|
provenancetypes.ProvenancePredicate
|
||||||
|
}
|
||||||
|
|
||||||
|
type provenanceBuilder struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.SolveResponse, ref string, mode confutil.MetadataProvenanceMode, pw progress.Writer) error {
|
||||||
|
if mode == confutil.MetadataProvenanceModeDisabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
pw = progress.ResetTime(pw)
|
||||||
|
return progress.Wrap("resolving provenance for metadata file", pw.Write, func(l progress.SubLogger) error {
|
||||||
|
res, err := fetchProvenance(ctx, c, ref, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
maps.Copy(sr.ExporterResponse, res)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchProvenance(ctx context.Context, c *client.Client, ref string, mode confutil.MetadataProvenanceMode) (out map[string]string, err error) {
|
||||||
|
cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
Ref: ref,
|
||||||
|
EarlyExit: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
for {
|
||||||
|
ev, err := cl.Recv()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ev.Record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ev.Record.Result != nil {
|
||||||
|
desc := lookupProvenance(ev.Record.Result)
|
||||||
|
if desc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||||
|
}
|
||||||
|
prv, err := encodeProvenance(dt, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
if out == nil {
|
||||||
|
out = make(map[string]string)
|
||||||
|
}
|
||||||
|
out["buildx.build.provenance"] = prv
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else if ev.Record.Results != nil {
|
||||||
|
for platform, res := range ev.Record.Results {
|
||||||
|
platform := platform
|
||||||
|
desc := lookupProvenance(res)
|
||||||
|
if desc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||||
|
}
|
||||||
|
prv, err := encodeProvenance(dt, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
if out == nil {
|
||||||
|
out = make(map[string]string)
|
||||||
|
}
|
||||||
|
out["buildx.build.provenance/"+platform] = prv
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, eg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupProvenance(res *controlapi.BuildResultInfo) *ocispecs.Descriptor {
|
||||||
|
for _, a := range res.Attestations {
|
||||||
|
if a.MediaType == "application/vnd.in-toto+json" && strings.HasPrefix(a.Annotations["in-toto.io/predicate-type"], "https://slsa.dev/provenance/") {
|
||||||
|
return &ocispecs.Descriptor{
|
||||||
|
Digest: digest.Digest(a.Digest),
|
||||||
|
Size: a.Size,
|
||||||
|
MediaType: a.MediaType,
|
||||||
|
Annotations: a.Annotations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeProvenance(dt []byte, mode confutil.MetadataProvenanceMode) (string, error) {
|
||||||
|
var prv provenancePredicate
|
||||||
|
if err := json.Unmarshal(dt, &prv); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to unmarshal provenance")
|
||||||
|
}
|
||||||
|
if prv.Builder != nil && prv.Builder.ID == "" {
|
||||||
|
// reset builder if id is empty
|
||||||
|
prv.Builder = nil
|
||||||
|
}
|
||||||
|
if mode == confutil.MetadataProvenanceModeMin {
|
||||||
|
// reset fields for minimal provenance
|
||||||
|
prv.BuildConfig = nil
|
||||||
|
prv.Metadata = nil
|
||||||
|
}
|
||||||
|
dtprv, err := json.Marshal(prv)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to marshal provenance")
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(dtprv), nil
|
||||||
|
}
|
||||||
164
build/replicatedstream.go
Normal file
164
build/replicatedstream.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SyncMultiReader struct {
|
||||||
|
source *bufio.Reader
|
||||||
|
buffer []byte
|
||||||
|
static []byte
|
||||||
|
mu sync.Mutex
|
||||||
|
cond *sync.Cond
|
||||||
|
readers []*syncReader
|
||||||
|
err error
|
||||||
|
offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncReader struct {
|
||||||
|
mr *SyncMultiReader
|
||||||
|
offset int
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSyncMultiReader(source io.Reader) *SyncMultiReader {
|
||||||
|
mr := &SyncMultiReader{
|
||||||
|
source: bufio.NewReader(source),
|
||||||
|
buffer: make([]byte, 0, 32*1024),
|
||||||
|
}
|
||||||
|
mr.cond = sync.NewCond(&mr.mu)
|
||||||
|
return mr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) Peek(n int) ([]byte, error) {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
if mr.static != nil {
|
||||||
|
return mr.static[min(n, len(mr.static)):], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return mr.source.Peek(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) Reset(dt []byte) {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
mr.static = dt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
if mr.static != nil {
|
||||||
|
return io.NopCloser(bytes.NewReader(mr.static))
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := &syncReader{
|
||||||
|
mr: mr,
|
||||||
|
}
|
||||||
|
mr.readers = append(mr.readers, reader)
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) Read(p []byte) (int, error) {
|
||||||
|
sr.mr.mu.Lock()
|
||||||
|
defer sr.mr.mu.Unlock()
|
||||||
|
|
||||||
|
return sr.read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) read(p []byte) (int, error) {
|
||||||
|
end := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if sr.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
end := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
|
||||||
|
if sr.mr.err != nil && sr.offset == end {
|
||||||
|
return 0, sr.mr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
start := sr.offset - sr.mr.offset
|
||||||
|
|
||||||
|
dt := sr.mr.buffer[start:]
|
||||||
|
|
||||||
|
if len(dt) > 0 {
|
||||||
|
n := copy(p, dt)
|
||||||
|
sr.offset += n
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for readers that have not caught up
|
||||||
|
hasOpen := false
|
||||||
|
for _, r := range sr.mr.readers {
|
||||||
|
if !r.closed {
|
||||||
|
hasOpen = true
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.offset < end {
|
||||||
|
sr.mr.cond.Wait()
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasOpen {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
last := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
// another reader has already updated the buffer
|
||||||
|
if last > end || sr.mr.err != nil {
|
||||||
|
return sr.read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.mr.offset += len(sr.mr.buffer)
|
||||||
|
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)]
|
||||||
|
n, err := sr.mr.source.Read(sr.mr.buffer)
|
||||||
|
if n >= 0 {
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:n]
|
||||||
|
} else {
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
sr.mr.err = err
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nn := copy(p, sr.mr.buffer)
|
||||||
|
sr.offset += nn
|
||||||
|
|
||||||
|
return nn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) Close() error {
|
||||||
|
sr.mr.mu.Lock()
|
||||||
|
defer sr.mr.mu.Unlock()
|
||||||
|
|
||||||
|
if sr.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.closed = true
|
||||||
|
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
76
build/replicatedstream_test.go
Normal file
76
build/replicatedstream_test.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
mathrand "math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func generateRandomData(size int) []byte {
|
||||||
|
data := make([]byte, size)
|
||||||
|
rand.Read(data)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
func TestSyncMultiReaderParallel(t *testing.T) {
|
||||||
|
data := generateRandomData(1024 * 1024)
|
||||||
|
source := bytes.NewReader(data)
|
||||||
|
mr := NewSyncMultiReader(source)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
numReaders := 10
|
||||||
|
bufferSize := 4096 * 4
|
||||||
|
|
||||||
|
readers := make([]io.ReadCloser, numReaders)
|
||||||
|
|
||||||
|
for i := range numReaders {
|
||||||
|
readers[i] = mr.NewReadCloser()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range numReaders {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(readerId int) {
|
||||||
|
defer wg.Done()
|
||||||
|
reader := readers[readerId]
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
totalRead := 0
|
||||||
|
buf := make([]byte, bufferSize)
|
||||||
|
for totalRead < len(data) {
|
||||||
|
// Simulate random read sizes
|
||||||
|
readSize := mathrand.Intn(bufferSize) //nolint:gosec
|
||||||
|
n, err := reader.Read(buf[:readSize])
|
||||||
|
|
||||||
|
if n > 0 {
|
||||||
|
assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId)
|
||||||
|
totalRead += n
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NoError(t, err, "Reader %d error", readerId)
|
||||||
|
|
||||||
|
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||||
|
t.Logf("Reader %d closing", readerId)
|
||||||
|
// Simulate random close
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate random timing between reads
|
||||||
|
time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId)
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
495
build/result.go
Normal file
495
build/result.go
Normal file
@@ -0,0 +1,495 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
_ "crypto/sha256" // ensure digests can be computed
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/solver/result"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewResultHandle makes a call to client.Build, additionally returning a
|
||||||
|
// opaque ResultHandle alongside the standard response and error.
|
||||||
|
//
|
||||||
|
// This ResultHandle can be used to execute additional build steps in the same
|
||||||
|
// context as the build occurred, which can allow easy debugging of build
|
||||||
|
// failures and successes.
|
||||||
|
//
|
||||||
|
// If the returned ResultHandle is not nil, the caller must call Done() on it.
|
||||||
|
func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) {
|
||||||
|
// Create a new context to wrap the original, and cancel it when the
|
||||||
|
// caller-provided context is cancelled.
|
||||||
|
//
|
||||||
|
// We derive the context from the background context so that we can forbid
|
||||||
|
// cancellation of the build request after <-done is closed (which we do
|
||||||
|
// before returning the ResultHandle).
|
||||||
|
baseCtx := ctx
|
||||||
|
ctx, cancel := context.WithCancelCause(context.Background())
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-baseCtx.Done():
|
||||||
|
cancel(baseCtx.Err())
|
||||||
|
case <-done:
|
||||||
|
// Once done is closed, we've recorded a ResultHandle, so we
|
||||||
|
// shouldn't allow cancelling the underlying build request anymore.
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create a new channel to forward status messages to the original.
|
||||||
|
//
|
||||||
|
// We do this so that we can discard status messages after the main portion
|
||||||
|
// of the build is complete. This is necessary for the solve error case,
|
||||||
|
// where the original gateway is kept open until the ResultHandle is
|
||||||
|
// closed - we don't want progress messages from operations in that
|
||||||
|
// ResultHandle to display after this function exits.
|
||||||
|
//
|
||||||
|
// Additionally, callers should wait for the progress channel to be closed.
|
||||||
|
// If we keep the session open and never close the progress channel, the
|
||||||
|
// caller will likely hang.
|
||||||
|
baseCh := ch
|
||||||
|
ch = make(chan *client.SolveStatus)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
s, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-baseCh:
|
||||||
|
// base channel is closed, discard status messages
|
||||||
|
default:
|
||||||
|
baseCh <- s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer close(baseCh)
|
||||||
|
|
||||||
|
var resp *client.SolveResponse
|
||||||
|
var respErr error
|
||||||
|
var respHandle *ResultHandle
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||||
|
|
||||||
|
var res *gateway.Result
|
||||||
|
var err error
|
||||||
|
resp, err = cc.Build(ctx, opt, product, func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
var err error
|
||||||
|
res, err = buildFunc(ctx, c)
|
||||||
|
|
||||||
|
if res != nil && err == nil {
|
||||||
|
// Force evaluation of the build result (otherwise, we likely
|
||||||
|
// won't get a solve error)
|
||||||
|
def, err2 := getDefinition(ctx, res)
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, err2
|
||||||
|
}
|
||||||
|
res, err = evalDefinition(ctx, c, def)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Scenario 1: we failed to evaluate a node somewhere in the
|
||||||
|
// build graph.
|
||||||
|
//
|
||||||
|
// In this case, we construct a ResultHandle from this
|
||||||
|
// original Build session, and return it alongside the original
|
||||||
|
// build error. We then need to keep the gateway session open
|
||||||
|
// until the caller explicitly closes the ResultHandle.
|
||||||
|
|
||||||
|
var se *errdefs.SolveError
|
||||||
|
if errors.As(err, &se) {
|
||||||
|
respHandle = &ResultHandle{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
solveErr: se,
|
||||||
|
gwClient: c,
|
||||||
|
gwCtx: ctx,
|
||||||
|
}
|
||||||
|
respErr = err // return original error to preserve stacktrace
|
||||||
|
close(done)
|
||||||
|
|
||||||
|
// Block until the caller closes the ResultHandle.
|
||||||
|
select {
|
||||||
|
case <-respHandle.done:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}, ch)
|
||||||
|
if respHandle != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// Something unexpected failed during the build, we didn't succeed,
|
||||||
|
// but we also didn't make it far enough to create a ResultHandle.
|
||||||
|
respErr = err
|
||||||
|
close(done)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scenario 2: we successfully built the image with no errors.
|
||||||
|
//
|
||||||
|
// In this case, the original gateway session has now been closed
|
||||||
|
// since the Build has been completed. So, we need to create a new
|
||||||
|
// gateway session to populate the ResultHandle. To do this, we
|
||||||
|
// need to re-evaluate the target result, in this new session. This
|
||||||
|
// should be instantaneous since the result should be cached.
|
||||||
|
|
||||||
|
def, err := getDefinition(ctx, res)
|
||||||
|
if err != nil {
|
||||||
|
respErr = err
|
||||||
|
close(done)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: ideally this second connection should be lazily opened
|
||||||
|
opt := opt
|
||||||
|
opt.Ref = ""
|
||||||
|
opt.Exports = nil
|
||||||
|
opt.CacheExports = nil
|
||||||
|
opt.Internal = true
|
||||||
|
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
res, err := evalDefinition(ctx, c, def)
|
||||||
|
if err != nil {
|
||||||
|
// This should probably not happen, since we've previously
|
||||||
|
// successfully evaluated the same result with no issues.
|
||||||
|
return nil, errors.Wrap(err, "inconsistent solve result")
|
||||||
|
}
|
||||||
|
respHandle = &ResultHandle{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
res: res,
|
||||||
|
gwClient: c,
|
||||||
|
gwCtx: ctx,
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
|
||||||
|
// Block until the caller closes the ResultHandle.
|
||||||
|
select {
|
||||||
|
case <-respHandle.done:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
return nil, context.Cause(ctx)
|
||||||
|
}, nil)
|
||||||
|
if respHandle != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Block until the other thread signals that it's completed the build.
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-baseCtx.Done():
|
||||||
|
if respErr == nil {
|
||||||
|
respErr = baseCtx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return respHandle, resp, respErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDefinition converts a gateway result into a collection of definitions for
|
||||||
|
// each ref in the result.
|
||||||
|
func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) {
|
||||||
|
return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) {
|
||||||
|
st, err := ref.ToState()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
def, err := st.Marshal(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return def.ToPB(), nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// evalDefinition performs the reverse of getDefinition, converting a
|
||||||
|
// collection of definitions into a gateway result.
|
||||||
|
func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*pb.Definition]) (*gateway.Result, error) {
|
||||||
|
// force evaluation of all targets in parallel
|
||||||
|
results := make(map[*pb.Definition]*gateway.Result)
|
||||||
|
resultsMu := sync.Mutex{}
|
||||||
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
|
defs.EachRef(func(def *pb.Definition) error {
|
||||||
|
eg.Go(func() error {
|
||||||
|
res, err := c.Solve(egCtx, gateway.SolveRequest{
|
||||||
|
Evaluate: true,
|
||||||
|
Definition: def,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resultsMu.Lock()
|
||||||
|
results[def] = res
|
||||||
|
resultsMu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res, _ := result.ConvertResult(defs, func(def *pb.Definition) (gateway.Reference, error) {
|
||||||
|
if res, ok := results[def]; ok {
|
||||||
|
return res.Ref, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResultHandle is a build result with the client that built it.
|
||||||
|
type ResultHandle struct {
|
||||||
|
res *gateway.Result
|
||||||
|
solveErr *errdefs.SolveError
|
||||||
|
|
||||||
|
done chan struct{}
|
||||||
|
doneOnce sync.Once
|
||||||
|
|
||||||
|
gwClient gateway.Client
|
||||||
|
gwCtx context.Context
|
||||||
|
|
||||||
|
cleanups []func()
|
||||||
|
cleanupsMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ResultHandle) Done() {
|
||||||
|
r.doneOnce.Do(func() {
|
||||||
|
r.cleanupsMu.Lock()
|
||||||
|
cleanups := r.cleanups
|
||||||
|
r.cleanups = nil
|
||||||
|
r.cleanupsMu.Unlock()
|
||||||
|
for _, f := range cleanups {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
|
||||||
|
close(r.done)
|
||||||
|
<-r.gwCtx.Done()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ResultHandle) registerCleanup(f func()) {
|
||||||
|
r.cleanupsMu.Lock()
|
||||||
|
r.cleanups = append(r.cleanups, f)
|
||||||
|
r.cleanupsMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
||||||
|
_, err = buildFunc(r.gwCtx, r.gwClient)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ResultHandle) getContainerConfig(cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||||
|
if r.res != nil && r.solveErr == nil {
|
||||||
|
logrus.Debugf("creating container from successful build")
|
||||||
|
ccfg, err := containerConfigFromResult(r.res, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return containerCfg, err
|
||||||
|
}
|
||||||
|
containerCfg = *ccfg
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||||
|
ccfg, err := containerConfigFromError(r.solveErr, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return containerCfg, errors.Wrapf(err, "no result nor error is available")
|
||||||
|
}
|
||||||
|
containerCfg = *ccfg
|
||||||
|
}
|
||||||
|
return containerCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||||
|
processCfg := newStartRequest(stdin, stdout, stderr)
|
||||||
|
if r.res != nil && r.solveErr == nil {
|
||||||
|
logrus.Debugf("creating container from successful build")
|
||||||
|
if err := populateProcessConfigFromResult(&processCfg, r.res, cfg); err != nil {
|
||||||
|
return processCfg, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||||
|
if err := populateProcessConfigFromError(&processCfg, r.solveErr, cfg); err != nil {
|
||||||
|
return processCfg, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return processCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerConfigFromResult(res *gateway.Result, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||||
|
if cfg.Initial {
|
||||||
|
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
||||||
|
}
|
||||||
|
|
||||||
|
ps, err := exptypes.ParsePlatforms(res.Metadata)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ref, ok := res.FindRef(ps.Platforms[0].ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("no reference found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &gateway.NewContainerRequest{
|
||||||
|
Mounts: []gateway.Mount{
|
||||||
|
{
|
||||||
|
Dest: "/",
|
||||||
|
MountType: pb.MountType_BIND,
|
||||||
|
Ref: ref,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Result, cfg *controllerapi.InvokeConfig) error {
|
||||||
|
imgData := res.Metadata[exptypes.ExporterImageConfigKey]
|
||||||
|
var img *specs.Image
|
||||||
|
if len(imgData) > 0 {
|
||||||
|
img = &specs.Image{}
|
||||||
|
if err := json.Unmarshal(imgData, img); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user := ""
|
||||||
|
if !cfg.NoUser {
|
||||||
|
user = cfg.User
|
||||||
|
} else if img != nil {
|
||||||
|
user = img.Config.User
|
||||||
|
}
|
||||||
|
|
||||||
|
cwd := ""
|
||||||
|
if !cfg.NoCwd {
|
||||||
|
cwd = cfg.Cwd
|
||||||
|
} else if img != nil {
|
||||||
|
cwd = img.Config.WorkingDir
|
||||||
|
}
|
||||||
|
|
||||||
|
env := []string{}
|
||||||
|
if img != nil {
|
||||||
|
env = append(env, img.Config.Env...)
|
||||||
|
}
|
||||||
|
env = append(env, cfg.Env...)
|
||||||
|
|
||||||
|
args := []string{}
|
||||||
|
if cfg.Entrypoint != nil {
|
||||||
|
args = append(args, cfg.Entrypoint...)
|
||||||
|
} else if img != nil {
|
||||||
|
args = append(args, img.Config.Entrypoint...)
|
||||||
|
}
|
||||||
|
if !cfg.NoCmd {
|
||||||
|
args = append(args, cfg.Cmd...)
|
||||||
|
} else if img != nil {
|
||||||
|
args = append(args, img.Config.Cmd...)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Args = args
|
||||||
|
req.Env = env
|
||||||
|
req.User = user
|
||||||
|
req.Cwd = cwd
|
||||||
|
req.Tty = cfg.Tty
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerConfigFromError(solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||||
|
exec, err := execOpFromError(solveErr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var mounts []gateway.Mount
|
||||||
|
for i, mnt := range exec.Mounts {
|
||||||
|
rid := solveErr.Solve.MountIDs[i]
|
||||||
|
if cfg.Initial {
|
||||||
|
rid = solveErr.Solve.InputIDs[i]
|
||||||
|
}
|
||||||
|
mounts = append(mounts, gateway.Mount{
|
||||||
|
Selector: mnt.Selector,
|
||||||
|
Dest: mnt.Dest,
|
||||||
|
ResultID: rid,
|
||||||
|
Readonly: mnt.Readonly,
|
||||||
|
MountType: mnt.MountType,
|
||||||
|
CacheOpt: mnt.CacheOpt,
|
||||||
|
SecretOpt: mnt.SecretOpt,
|
||||||
|
SSHOpt: mnt.SSHOpt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return &gateway.NewContainerRequest{
|
||||||
|
Mounts: mounts,
|
||||||
|
NetMode: exec.Network,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateProcessConfigFromError(req *gateway.StartRequest, solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) error {
|
||||||
|
exec, err := execOpFromError(solveErr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
meta := exec.Meta
|
||||||
|
user := ""
|
||||||
|
if !cfg.NoUser {
|
||||||
|
user = cfg.User
|
||||||
|
} else {
|
||||||
|
user = meta.User
|
||||||
|
}
|
||||||
|
|
||||||
|
cwd := ""
|
||||||
|
if !cfg.NoCwd {
|
||||||
|
cwd = cfg.Cwd
|
||||||
|
} else {
|
||||||
|
cwd = meta.Cwd
|
||||||
|
}
|
||||||
|
|
||||||
|
env := append(meta.Env, cfg.Env...)
|
||||||
|
|
||||||
|
args := []string{}
|
||||||
|
if cfg.Entrypoint != nil {
|
||||||
|
args = append(args, cfg.Entrypoint...)
|
||||||
|
}
|
||||||
|
if cfg.Cmd != nil {
|
||||||
|
args = append(args, cfg.Cmd...)
|
||||||
|
}
|
||||||
|
if len(args) == 0 {
|
||||||
|
args = meta.Args
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Args = args
|
||||||
|
req.Env = env
|
||||||
|
req.User = user
|
||||||
|
req.Cwd = cwd
|
||||||
|
req.Tty = cfg.Tty
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func execOpFromError(solveErr *errdefs.SolveError) (*pb.ExecOp, error) {
|
||||||
|
if solveErr == nil {
|
||||||
|
return nil, errors.Errorf("no error is available")
|
||||||
|
}
|
||||||
|
switch op := solveErr.Solve.Op.GetOp().(type) {
|
||||||
|
case *pb.Op_Exec:
|
||||||
|
return op.Exec, nil
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("invoke: unsupported error type")
|
||||||
|
}
|
||||||
|
// TODO: support other ops
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStartRequest(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) gateway.StartRequest {
|
||||||
|
return gateway.StartRequest{
|
||||||
|
Stdin: stdin,
|
||||||
|
Stdout: stdout,
|
||||||
|
Stderr: stderr,
|
||||||
|
}
|
||||||
|
}
|
||||||
12
build/url.go
12
build/url.go
@@ -7,13 +7,16 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url string, pw progress.Writer) (string, error) {
|
const maxDockerfileSize = 2 * 1024 * 1024 // 2 MB
|
||||||
|
|
||||||
|
func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, url string, pw progress.Writer) (string, error) {
|
||||||
c, err := driver.Boot(ctx, ctx, d, pw)
|
c, err := driver.Boot(ctx, ctx, d, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -21,7 +24,7 @@ func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url strin
|
|||||||
var out string
|
var out string
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
_, err = c.Build(ctx, client.SolveOpt{Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||||
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -43,8 +46,8 @@ func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url strin
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if stat.Size() > 512*1024 {
|
if stat.Size > maxDockerfileSize {
|
||||||
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size", url)
|
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size (%s)", url, units.HumanSize(maxDockerfileSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||||
@@ -63,7 +66,6 @@ func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url strin
|
|||||||
out = dir
|
out = dir
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}, ch)
|
}, ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,20 +3,43 @@ package build
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// archiveHeaderSize is the number of bytes in an archive header
|
const (
|
||||||
const archiveHeaderSize = 512
|
// archiveHeaderSize is the number of bytes in an archive header
|
||||||
|
archiveHeaderSize = 512
|
||||||
|
// mobyHostGatewayName defines a special string which users can append to
|
||||||
|
// --add-host to add an extra entry in /etc/hosts that maps
|
||||||
|
// host.docker.internal to the host IP
|
||||||
|
mobyHostGatewayName = "host-gateway"
|
||||||
|
)
|
||||||
|
|
||||||
func isLocalDir(c string) bool {
|
// isHTTPURL returns true if the provided str is an HTTP(S) URL by checking if it
|
||||||
st, err := os.Stat(c)
|
// has a http:// or https:// scheme. No validation is performed to verify if the
|
||||||
return err == nil && st.IsDir()
|
// URL is well-formed.
|
||||||
|
func isHTTPURL(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsRemoteURL(c string) bool {
|
||||||
|
if isHTTPURL(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, err := gitutil.ParseGitRef(c); err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func isArchive(header []byte) bool {
|
func isArchive(header []byte) bool {
|
||||||
@@ -39,18 +62,39 @@ func isArchive(header []byte) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
||||||
func toBuildkitExtraHosts(inp []string) (string, error) {
|
func toBuildkitExtraHosts(ctx context.Context, inp []string, nodeDriver *driver.DriverHandle) (string, error) {
|
||||||
if len(inp) == 0 {
|
if len(inp) == 0 {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
hosts := make([]string, 0, len(inp))
|
hosts := make([]string, 0, len(inp))
|
||||||
for _, h := range inp {
|
for _, h := range inp {
|
||||||
parts := strings.Split(h, ":")
|
host, ip, ok := strings.Cut(h, "=")
|
||||||
|
if !ok {
|
||||||
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
|
host, ip, ok = strings.Cut(h, ":")
|
||||||
|
}
|
||||||
|
if !ok || host == "" || ip == "" {
|
||||||
return "", errors.Errorf("invalid host %s", h)
|
return "", errors.Errorf("invalid host %s", h)
|
||||||
}
|
}
|
||||||
hosts = append(hosts, parts[0]+"="+parts[1])
|
// If the IP Address is a "host-gateway", replace this value with the
|
||||||
|
// IP address provided by the worker's label.
|
||||||
|
if ip == mobyHostGatewayName {
|
||||||
|
hgip, err := nodeDriver.HostGatewayIP(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "unable to derive the IP value for host-gateway")
|
||||||
|
}
|
||||||
|
ip = hgip.String()
|
||||||
|
} else {
|
||||||
|
// If the address is enclosed in square brackets, extract it (for IPv6, but
|
||||||
|
// permit it for IPv4 as well; we don't know the address family here, but it's
|
||||||
|
// unambiguous).
|
||||||
|
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
|
||||||
|
ip = ip[1 : len(ip)-1]
|
||||||
|
}
|
||||||
|
if net.ParseIP(ip) == nil {
|
||||||
|
return "", errors.Errorf("invalid host %s", h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hosts = append(hosts, host+"="+ip)
|
||||||
}
|
}
|
||||||
return strings.Join(hosts, ","), nil
|
return strings.Join(hosts, ","), nil
|
||||||
}
|
}
|
||||||
@@ -66,3 +110,21 @@ func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
|
|||||||
}
|
}
|
||||||
return strings.Join(ulimits, ","), nil
|
return strings.Join(ulimits, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func notSupported(f driver.Feature, d *driver.DriverHandle, docs string) error {
|
||||||
|
return errors.Errorf(`%s is not supported for the %s driver.
|
||||||
|
Switch to a different driver, or turn on the containerd image store, and try again.
|
||||||
|
Learn more at %s`, f, d.Factory().Name(), docs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func noDefaultLoad() bool {
|
||||||
|
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("invalid non-bool value for BUILDX_NO_DEFAULT_LOAD: %s", v)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|||||||
148
build/utils_test.go
Normal file
148
build/utils_test.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestToBuildkitExtraHosts(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
doc string
|
||||||
|
input []string
|
||||||
|
expectedOut string // Expect output==input if not set.
|
||||||
|
expectedErr string // Expect success if not set.
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "IPv4, colon sep",
|
||||||
|
input: []string{`myhost:192.168.0.1`},
|
||||||
|
expectedOut: `myhost=192.168.0.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv4, eq sep",
|
||||||
|
input: []string{`myhost=192.168.0.1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Weird but permitted, IPv4 with brackets",
|
||||||
|
input: []string{`myhost=[192.168.0.1]`},
|
||||||
|
expectedOut: `myhost=192.168.0.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Host and domain",
|
||||||
|
input: []string{`host.and.domain.invalid:10.0.2.1`},
|
||||||
|
expectedOut: `host.and.domain.invalid=10.0.2.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, colon sep",
|
||||||
|
input: []string{`anipv6host:2003:ab34:e::1`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, colon sep, brackets",
|
||||||
|
input: []string{`anipv6host:[2003:ab34:e::1]`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, eq sep, brackets",
|
||||||
|
input: []string{`anipv6host=[2003:ab34:e::1]`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, colon sep",
|
||||||
|
input: []string{`ipv6local:::1`},
|
||||||
|
expectedOut: `ipv6local=::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, eq sep",
|
||||||
|
input: []string{`ipv6local=::1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, eq sep, brackets",
|
||||||
|
input: []string{`ipv6local=[::1]`},
|
||||||
|
expectedOut: `ipv6local=::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, colon sep",
|
||||||
|
input: []string{`ipv6local:0:0:0:0:0:0:0:1`},
|
||||||
|
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, eq sep",
|
||||||
|
input: []string{`ipv6local=0:0:0:0:0:0:0:1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, eq sep, brackets",
|
||||||
|
input: []string{`ipv6local=[0:0:0:0:0:0:0:1]`},
|
||||||
|
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad address, colon sep",
|
||||||
|
input: []string{`myhost:192.notanipaddress.1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad address, eq sep",
|
||||||
|
input: []string{`myhost=192.notanipaddress.1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "No sep",
|
||||||
|
input: []string{`thathost-nosemicolon10.0.0.1`},
|
||||||
|
expectedErr: `bad format for add-host: "thathost-nosemicolon10.0.0.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6",
|
||||||
|
input: []string{`anipv6host:::::1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::::1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, trailing colons",
|
||||||
|
input: []string{`ipv6local:::0::`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::0::"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, missing close bracket",
|
||||||
|
input: []string{`ipv6addr=[::1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "[::1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, missing open bracket",
|
||||||
|
input: []string{`ipv6addr=::1]`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::1]"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Missing address, colon sep",
|
||||||
|
input: []string{`myhost.invalid:`},
|
||||||
|
expectedErr: `invalid IP address in add-host: ""`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Missing address, eq sep",
|
||||||
|
input: []string{`myhost.invalid=`},
|
||||||
|
expectedErr: `invalid IP address in add-host: ""`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "No input",
|
||||||
|
input: []string{``},
|
||||||
|
expectedErr: `bad format for add-host: ""`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
tc := tc
|
||||||
|
if tc.expectedOut == "" {
|
||||||
|
tc.expectedOut = strings.Join(tc.input, ",")
|
||||||
|
}
|
||||||
|
t.Run(tc.doc, func(t *testing.T) {
|
||||||
|
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
||||||
|
if tc.expectedErr == "" {
|
||||||
|
require.Equal(t, tc.expectedOut, actualOut)
|
||||||
|
require.NoError(t, actualErr)
|
||||||
|
} else {
|
||||||
|
require.Zero(t, actualOut)
|
||||||
|
require.Error(t, actualErr, tc.expectedErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
686
builder/builder.go
Normal file
686
builder/builder.go
Normal file
@@ -0,0 +1,686 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
k8sutil "github.com/docker/buildx/driver/kubernetes/util"
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/imagetools"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
dopts "github.com/docker/cli/opts"
|
||||||
|
"github.com/google/shlex"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Builder represents an active builder object
|
||||||
|
type Builder struct {
|
||||||
|
*store.NodeGroup
|
||||||
|
driverFactory driverFactory
|
||||||
|
nodes []Node
|
||||||
|
opts builderOpts
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type builderOpts struct {
|
||||||
|
dockerCli command.Cli
|
||||||
|
name string
|
||||||
|
txn *store.Txn
|
||||||
|
contextPathHash string
|
||||||
|
validate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option provides a variadic option for configuring the builder.
|
||||||
|
type Option func(b *Builder)
|
||||||
|
|
||||||
|
// WithName sets builder name.
|
||||||
|
func WithName(name string) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStore sets a store instance used at init.
|
||||||
|
func WithStore(txn *store.Txn) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.txn = txn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContextPathHash is used for determining pods in k8s driver instance.
|
||||||
|
func WithContextPathHash(contextPathHash string) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.contextPathHash = contextPathHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSkippedValidation skips builder context validation.
|
||||||
|
func WithSkippedValidation() Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.validate = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New initializes a new builder client
|
||||||
|
func New(dockerCli command.Cli, opts ...Option) (_ *Builder, err error) {
|
||||||
|
b := &Builder{
|
||||||
|
opts: builderOpts{
|
||||||
|
dockerCli: dockerCli,
|
||||||
|
validate: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.opts.txn == nil {
|
||||||
|
// if store instance is nil we create a short-lived one using the
|
||||||
|
// default store and ensure we release it on completion
|
||||||
|
var release func()
|
||||||
|
b.opts.txn, release, err = storeutil.GetStore(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.opts.name != "" {
|
||||||
|
if b.NodeGroup, err = storeutil.GetNodeGroup(b.opts.txn, dockerCli, b.opts.name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if b.NodeGroup, err = storeutil.GetCurrentInstance(b.opts.txn, dockerCli); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.opts.validate {
|
||||||
|
if err = b.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate validates builder context
|
||||||
|
func (b *Builder) Validate() error {
|
||||||
|
if b.NodeGroup != nil && b.NodeGroup.DockerContext {
|
||||||
|
list, err := b.opts.dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentContext := b.opts.dockerCli.CurrentContext()
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == b.Name && l.Name != currentContext {
|
||||||
|
return errors.Errorf("use `docker --context=%s buildx` to switch to context %q", l.Name, l.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextName returns builder context name if available.
|
||||||
|
func (b *Builder) ContextName() string {
|
||||||
|
ctxbuilders, err := b.opts.dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
for _, cb := range ctxbuilders {
|
||||||
|
if b.NodeGroup.Driver == "docker" && len(b.NodeGroup.Nodes) == 1 && b.NodeGroup.Nodes[0].Endpoint == cb.Name {
|
||||||
|
return cb.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageOpt returns registry auth configuration
|
||||||
|
func (b *Builder) ImageOpt() (imagetools.Opt, error) {
|
||||||
|
return storeutil.GetImageConfig(b.opts.dockerCli, b.NodeGroup)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boot bootstrap a builder
|
||||||
|
func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
||||||
|
toBoot := make([]int, 0, len(b.nodes))
|
||||||
|
for idx, d := range b.nodes {
|
||||||
|
if d.Err != nil || d.Driver == nil || d.DriverInfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if d.DriverInfo.Status != driver.Running {
|
||||||
|
toBoot = append(toBoot, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(toBoot) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.AutoMode)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
errCh := make(chan error, len(toBoot))
|
||||||
|
for _, idx := range toBoot {
|
||||||
|
func(idx int) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
pw := progress.WithPrefix(printer, b.NodeGroup.Nodes[idx].Name, len(toBoot) > 1)
|
||||||
|
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
b.nodes[idx].Err = err
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = eg.Wait()
|
||||||
|
close(errCh)
|
||||||
|
err1 := printer.Wait()
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && len(errCh) > 0 {
|
||||||
|
return false, <-errCh
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inactive checks if all nodes are inactive for this builder.
|
||||||
|
func (b *Builder) Inactive() bool {
|
||||||
|
for _, d := range b.nodes {
|
||||||
|
if d.DriverInfo != nil && d.DriverInfo.Status == driver.Running {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns error if any.
|
||||||
|
func (b *Builder) Err() error {
|
||||||
|
return b.err
|
||||||
|
}
|
||||||
|
|
||||||
|
type driverFactory struct {
|
||||||
|
driver.Factory
|
||||||
|
once sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// Factory returns the driver factory.
|
||||||
|
func (b *Builder) Factory(ctx context.Context, dialMeta map[string][]string) (_ driver.Factory, err error) {
|
||||||
|
b.driverFactory.once.Do(func() {
|
||||||
|
if b.Driver != "" {
|
||||||
|
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// empty driver means nodegroup was implicitly created as a default
|
||||||
|
// driver for a docker context and allows falling back to a
|
||||||
|
// docker-container driver for older daemon that doesn't support
|
||||||
|
// buildkit (< 18.06).
|
||||||
|
ep := b.NodeGroup.Nodes[0].Endpoint
|
||||||
|
var dockerapi *dockerutil.ClientAPI
|
||||||
|
dockerapi, err = dockerutil.NewClientAPI(b.opts.dockerCli, b.NodeGroup.Nodes[0].Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// check if endpoint is healthy is needed to determine the driver type.
|
||||||
|
// if this fails then can't continue with driver selection.
|
||||||
|
if _, err = dockerapi.Ping(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false, dialMeta)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.Driver = b.driverFactory.Factory.Name()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return b.driverFactory.Factory, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) MarshalJSON() ([]byte, error) {
|
||||||
|
var berr string
|
||||||
|
if b.err != nil {
|
||||||
|
berr = strings.TrimSpace(b.err.Error())
|
||||||
|
}
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Name string
|
||||||
|
Driver string
|
||||||
|
LastActivity time.Time `json:",omitempty"`
|
||||||
|
Dynamic bool
|
||||||
|
Nodes []Node
|
||||||
|
Err string `json:",omitempty"`
|
||||||
|
}{
|
||||||
|
Name: b.Name,
|
||||||
|
Driver: b.Driver,
|
||||||
|
LastActivity: b.LastActivity,
|
||||||
|
Dynamic: b.Dynamic,
|
||||||
|
Nodes: b.nodes,
|
||||||
|
Err: berr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBuilders returns all builders
|
||||||
|
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||||
|
storeng, err := txn.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Slice(contexts, func(i, j int) bool {
|
||||||
|
return contexts[i].Name < contexts[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
for i, ng := range storeng {
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(ng.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builders[i] = b
|
||||||
|
seen[b.NodeGroup.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range contexts {
|
||||||
|
// if a context has the same name as an instance from the store, do not
|
||||||
|
// add it to the builders list. An instance from the store takes
|
||||||
|
// precedence over context builders.
|
||||||
|
if _, ok := seen[c.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(c.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builders = append(builders, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return builders, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateOpts struct {
|
||||||
|
Name string
|
||||||
|
Driver string
|
||||||
|
NodeName string
|
||||||
|
Platforms []string
|
||||||
|
BuildkitdFlags string
|
||||||
|
BuildkitdConfigFile string
|
||||||
|
DriverOpts []string
|
||||||
|
Use bool
|
||||||
|
Endpoint string
|
||||||
|
Append bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts CreateOpts) (*Builder, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if opts.Name == "default" {
|
||||||
|
return nil, errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
||||||
|
} else if opts.Append && opts.Name == "" {
|
||||||
|
return nil, errors.Errorf("append requires a builder name")
|
||||||
|
}
|
||||||
|
|
||||||
|
name := opts.Name
|
||||||
|
if name == "" {
|
||||||
|
name, err = store.GenerateName(txn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.Append {
|
||||||
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, c := range contexts {
|
||||||
|
if c.Name == name {
|
||||||
|
return nil, errors.Errorf("instance name %q already exists as context builder", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ng, err := txn.NodeGroupByName(name)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(errors.Cause(err)) {
|
||||||
|
if opts.Append && opts.Name != "" {
|
||||||
|
return nil, errors.Errorf("failed to find instance %q for append", opts.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
||||||
|
|
||||||
|
driverName := opts.Driver
|
||||||
|
if driverName == "" {
|
||||||
|
if ng != nil {
|
||||||
|
driverName = ng.Driver
|
||||||
|
} else if opts.Endpoint == "" && buildkitHost != "" {
|
||||||
|
driverName = "remote"
|
||||||
|
} else {
|
||||||
|
f, err := driver.GetDefaultFactory(ctx, opts.Endpoint, dockerCli.Client(), true, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
return nil, errors.Errorf("no valid drivers found")
|
||||||
|
}
|
||||||
|
driverName = f.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ng != nil {
|
||||||
|
if opts.NodeName == "" && !opts.Append {
|
||||||
|
return nil, errors.Errorf("existing instance for %q but no append mode, specify the node name to make changes for existing instances", name)
|
||||||
|
}
|
||||||
|
if driverName != ng.Driver {
|
||||||
|
return nil, errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := driver.GetFactory(driverName, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ngOriginal := ng
|
||||||
|
if ngOriginal != nil {
|
||||||
|
ngOriginal = ngOriginal.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ng == nil {
|
||||||
|
ng = &store.NodeGroup{
|
||||||
|
Name: name,
|
||||||
|
Driver: driverName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
driverOpts, err := csvToMap(opts.DriverOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitdConfigFile := opts.BuildkitdConfigFile
|
||||||
|
if buildkitdConfigFile == "" {
|
||||||
|
// if buildkit daemon config is not provided, check if the default one
|
||||||
|
// is available and use it
|
||||||
|
if f, ok := confutil.NewConfig(dockerCli).BuildKitConfigFile(); ok {
|
||||||
|
buildkitdConfigFile = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitdFlags, err := parseBuildkitdFlags(opts.BuildkitdFlags, driverName, driverOpts, buildkitdConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ep string
|
||||||
|
var setEp bool
|
||||||
|
switch {
|
||||||
|
case driverName == "kubernetes":
|
||||||
|
if opts.Endpoint != "" {
|
||||||
|
return nil, errors.Errorf("kubernetes driver does not support endpoint args %q", opts.Endpoint)
|
||||||
|
}
|
||||||
|
// generate node name if not provided to avoid duplicated endpoint
|
||||||
|
// error: https://github.com/docker/setup-buildx-action/issues/215
|
||||||
|
nodeName := opts.NodeName
|
||||||
|
if nodeName == "" {
|
||||||
|
nodeName, err = k8sutil.GenerateNodeName(name, txn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// naming endpoint to make append works
|
||||||
|
ep = (&url.URL{
|
||||||
|
Scheme: driverName,
|
||||||
|
Path: "/" + name,
|
||||||
|
RawQuery: (&url.Values{
|
||||||
|
"deployment": {nodeName},
|
||||||
|
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||||
|
}).Encode(),
|
||||||
|
}).String()
|
||||||
|
setEp = false
|
||||||
|
case driverName == "remote":
|
||||||
|
if opts.Endpoint != "" {
|
||||||
|
ep = opts.Endpoint
|
||||||
|
} else if buildkitHost != "" {
|
||||||
|
ep = buildkitHost
|
||||||
|
} else {
|
||||||
|
return nil, errors.Errorf("no remote endpoint provided")
|
||||||
|
}
|
||||||
|
ep, err = validateBuildkitEndpoint(ep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = true
|
||||||
|
case opts.Endpoint != "":
|
||||||
|
ep, err = validateEndpoint(dockerCli, opts.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = true
|
||||||
|
default:
|
||||||
|
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||||
|
return nil, errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with context set to <context-name>")
|
||||||
|
}
|
||||||
|
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ng.Update(opts.NodeName, ep, opts.Platforms, setEp, opts.Append, buildkitdFlags, buildkitdConfigFile, driverOpts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := txn.Save(ng); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(ng.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
|
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
if err := node.Err; err != nil {
|
||||||
|
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||||
|
var err2 error
|
||||||
|
if ngOriginal == nil {
|
||||||
|
err2 = txn.Remove(ng.Name)
|
||||||
|
} else {
|
||||||
|
err2 = txn.Save(ngOriginal)
|
||||||
|
}
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, errors.Errorf("could not rollback to previous state: %s", err2)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Use && ep != "" {
|
||||||
|
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type LeaveOpts struct {
|
||||||
|
Name string
|
||||||
|
NodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func Leave(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts LeaveOpts) error {
|
||||||
|
if opts.Name == "" {
|
||||||
|
return errors.Errorf("leave requires instance name")
|
||||||
|
}
|
||||||
|
if opts.NodeName == "" {
|
||||||
|
return errors.Errorf("leave requires node name")
|
||||||
|
}
|
||||||
|
|
||||||
|
ng, err := txn.NodeGroupByName(opts.Name)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(errors.Cause(err)) {
|
||||||
|
return errors.Errorf("failed to find instance %q for leave", opts.Name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ng.Leave(opts.NodeName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ls.RemoveBuilderNode(ng.Name, opts.NodeName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return txn.Save(ng)
|
||||||
|
}
|
||||||
|
|
||||||
|
func csvToMap(in []string) (map[string]string, error) {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
m := make(map[string]string, len(in))
|
||||||
|
for _, s := range in {
|
||||||
|
fields, err := csvvalue.Fields(s, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, v := range fields {
|
||||||
|
p := strings.SplitN(v, "=", 2)
|
||||||
|
if len(p) != 2 {
|
||||||
|
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
||||||
|
}
|
||||||
|
m[p[0]] = p[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||||
|
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||||
|
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||||
|
if err == nil && dem != nil {
|
||||||
|
if ep == "default" {
|
||||||
|
return dem.Host, nil
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
h, err := dopts.ParseHost(true, ep)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||||
|
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||||
|
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBuildkitdFlags parses buildkit flags
|
||||||
|
func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string, buildkitdConfigFile string) (res []string, err error) {
|
||||||
|
if inp != "" {
|
||||||
|
res, err = shlex.Split(inp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to parse buildkit flags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var allowInsecureEntitlements []string
|
||||||
|
flags := pflag.NewFlagSet("buildkitd", pflag.ContinueOnError)
|
||||||
|
flags.Usage = func() {}
|
||||||
|
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||||
|
_ = flags.Parse(res)
|
||||||
|
|
||||||
|
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
|
||||||
|
|
||||||
|
var hasNetworkHostEntitlementInConf bool
|
||||||
|
if buildkitdConfigFile != "" {
|
||||||
|
btoml, err := confutil.LoadConfigTree(buildkitdConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if btoml != nil {
|
||||||
|
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||||
|
if slices.Contains(ies.([]string), "network.host") {
|
||||||
|
hasNetworkHostEntitlementInConf = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := driverOpts["network"]; ok && v == "host" && !hasNetworkHostEntitlement && driver == "docker-container" {
|
||||||
|
// always set network.host entitlement if user has set network=host
|
||||||
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
|
} else if len(allowInsecureEntitlements) == 0 && !hasNetworkHostEntitlementInConf && (driver == "kubernetes" || driver == "docker-container") {
|
||||||
|
// set network.host entitlement if user does not provide any as
|
||||||
|
// network is isolated for container drivers.
|
||||||
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
204
builder/builder_test.go
Normal file
204
builder/builder_test.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCsvToMap(t *testing.T) {
|
||||||
|
d := []string{
|
||||||
|
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
||||||
|
"namespace=default",
|
||||||
|
}
|
||||||
|
r, err := csvToMap(d)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Contains(t, r, "tolerations")
|
||||||
|
require.Equal(t, "key=foo,value=bar;key=foo2,value=bar2", r["tolerations"])
|
||||||
|
|
||||||
|
require.Contains(t, r, "replicas")
|
||||||
|
require.Equal(t, "1", r["replicas"])
|
||||||
|
|
||||||
|
require.Contains(t, r, "namespace")
|
||||||
|
require.Equal(t, "default", r["namespace"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseBuildkitdFlags(t *testing.T) {
|
||||||
|
dirConf := t.TempDir()
|
||||||
|
|
||||||
|
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
|
||||||
|
# debug enables additional debug logging
|
||||||
|
debug = true
|
||||||
|
# insecure-entitlements allows insecure entitlements, disabled by default.
|
||||||
|
insecure-entitlements = [ "network.host", "security.insecure" ]
|
||||||
|
[log]
|
||||||
|
# log formatter: json or text
|
||||||
|
format = "text"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
|
||||||
|
[worker.oci]
|
||||||
|
gc = "maybe"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
|
||||||
|
foo = "bar"
|
||||||
|
`), 0644))
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
flags string
|
||||||
|
driver string
|
||||||
|
driverOpts map[string]string
|
||||||
|
buildkitdConfigFile string
|
||||||
|
expected []string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"docker-container no flags",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kubernetes no flags",
|
||||||
|
"",
|
||||||
|
"kubernetes",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remote no flags",
|
||||||
|
"",
|
||||||
|
"remote",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure flag",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure and host flag",
|
||||||
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with network host opt",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with host flag and network host opt",
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure, host flag and network host opt",
|
||||||
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with buildkitd conf setting network.host entitlement",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfPath,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error parsing flags",
|
||||||
|
"foo'",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error parsing buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfBrokenPath,
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"unknown field in buildkit config",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfUnknownFieldPath,
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range testCases {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
flags, err := parseBuildkitdFlags(tt.flags, tt.driver, tt.driverOpts, tt.buildkitdConfigFile)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tt.expected, flags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
280
builder/node.go
Normal file
280
builder/node.go
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/imagetools"
|
||||||
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
store.Node
|
||||||
|
Builder string
|
||||||
|
Driver *driver.DriverHandle
|
||||||
|
DriverInfo *driver.Info
|
||||||
|
ImageOpt imagetools.Opt
|
||||||
|
ProxyConfig map[string]string
|
||||||
|
Version string
|
||||||
|
Err error
|
||||||
|
|
||||||
|
// worker settings
|
||||||
|
IDs []string
|
||||||
|
Platforms []ocispecs.Platform
|
||||||
|
GCPolicy []client.PruneInfo
|
||||||
|
Labels map[string]string
|
||||||
|
CDIDevices []client.CDIDevice
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes returns nodes for this builder.
|
||||||
|
func (b *Builder) Nodes() []Node {
|
||||||
|
return b.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoadNodesOption func(*loadNodesOptions)
|
||||||
|
|
||||||
|
type loadNodesOptions struct {
|
||||||
|
data bool
|
||||||
|
dialMeta map[string][]string
|
||||||
|
clientOpt []client.ClientOpt
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithData() LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.data = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithDialMeta(dialMeta map[string][]string) LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.dialMeta = dialMeta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithClientOpt(clientOpt ...client.ClientOpt) LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.clientOpt = clientOpt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadNodes loads and returns nodes for this builder.
|
||||||
|
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
||||||
|
func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []Node, err error) {
|
||||||
|
lno := loadNodesOptions{
|
||||||
|
data: false,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&lno)
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if b.err == nil && err != nil {
|
||||||
|
b.err = err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
factory, err := b.Factory(ctx, lno.dialMeta)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageopt, err := b.ImageOpt()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range b.NodeGroup.Nodes {
|
||||||
|
func(i int, n store.Node) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
node := Node{
|
||||||
|
Node: n,
|
||||||
|
ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli),
|
||||||
|
Platforms: n.Platforms,
|
||||||
|
Builder: b.Name,
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
b.nodes[i] = node
|
||||||
|
}()
|
||||||
|
|
||||||
|
dockerapi, err := dockerutil.NewClientAPI(b.opts.dockerCli, n.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
node.Err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := driver.GetDriver(ctx, factory, driver.InitConfig{
|
||||||
|
Name: driver.BuilderName(n.Name),
|
||||||
|
EndpointAddr: n.Endpoint,
|
||||||
|
DockerAPI: dockerapi,
|
||||||
|
ContextStore: b.opts.dockerCli.ContextStore(),
|
||||||
|
BuildkitdFlags: n.BuildkitdFlags,
|
||||||
|
Files: n.Files,
|
||||||
|
DriverOpts: n.DriverOpts,
|
||||||
|
Auth: imageopt.Auth,
|
||||||
|
Platforms: n.Platforms,
|
||||||
|
ContextPathHash: b.opts.contextPathHash,
|
||||||
|
DialMeta: lno.dialMeta,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
node.Err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node.Driver = d
|
||||||
|
node.ImageOpt = imageopt
|
||||||
|
|
||||||
|
if lno.data {
|
||||||
|
if err := node.loadData(ctx, lno.clientOpt...); err != nil {
|
||||||
|
node.Err = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(i, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: This should be done in the routine loading driver data
|
||||||
|
if lno.data {
|
||||||
|
kubernetesDriverCount := 0
|
||||||
|
for _, d := range b.nodes {
|
||||||
|
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
|
||||||
|
kubernetesDriverCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isAllKubernetesDrivers := len(b.nodes) == kubernetesDriverCount
|
||||||
|
if isAllKubernetesDrivers {
|
||||||
|
var nodes []Node
|
||||||
|
var dynamicNodes []store.Node
|
||||||
|
for _, di := range b.nodes {
|
||||||
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
|
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
||||||
|
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
||||||
|
for i := range di.DriverInfo.DynamicNodes {
|
||||||
|
diClone := di
|
||||||
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
|
diClone.Platforms = pl
|
||||||
|
}
|
||||||
|
nodes = append(nodes, diClone)
|
||||||
|
}
|
||||||
|
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// not append (remove the static nodes in the store)
|
||||||
|
b.NodeGroup.Nodes = dynamicNodes
|
||||||
|
b.nodes = nodes
|
||||||
|
b.NodeGroup.Dynamic = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) MarshalJSON() ([]byte, error) {
|
||||||
|
var status string
|
||||||
|
if n.DriverInfo != nil {
|
||||||
|
status = n.DriverInfo.Status.String()
|
||||||
|
}
|
||||||
|
var nerr string
|
||||||
|
if n.Err != nil {
|
||||||
|
status = "error"
|
||||||
|
nerr = strings.TrimSpace(n.Err.Error())
|
||||||
|
}
|
||||||
|
var pp []string
|
||||||
|
for _, p := range n.Platforms {
|
||||||
|
pp = append(pp, platforms.Format(p))
|
||||||
|
}
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Name string
|
||||||
|
Endpoint string
|
||||||
|
BuildkitdFlags []string `json:"Flags,omitempty"`
|
||||||
|
DriverOpts map[string]string `json:",omitempty"`
|
||||||
|
Files map[string][]byte `json:",omitempty"`
|
||||||
|
Status string `json:",omitempty"`
|
||||||
|
ProxyConfig map[string]string `json:",omitempty"`
|
||||||
|
Version string `json:",omitempty"`
|
||||||
|
Err string `json:",omitempty"`
|
||||||
|
IDs []string `json:",omitempty"`
|
||||||
|
Platforms []string `json:",omitempty"`
|
||||||
|
GCPolicy []client.PruneInfo `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
}{
|
||||||
|
Name: n.Name,
|
||||||
|
Endpoint: n.Endpoint,
|
||||||
|
BuildkitdFlags: n.BuildkitdFlags,
|
||||||
|
DriverOpts: n.DriverOpts,
|
||||||
|
Files: n.Files,
|
||||||
|
Status: status,
|
||||||
|
ProxyConfig: n.ProxyConfig,
|
||||||
|
Version: n.Version,
|
||||||
|
Err: nerr,
|
||||||
|
IDs: n.IDs,
|
||||||
|
Platforms: pp,
|
||||||
|
GCPolicy: n.GCPolicy,
|
||||||
|
Labels: n.Labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) error {
|
||||||
|
if n.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
info, err := n.Driver.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.DriverInfo = info
|
||||||
|
if n.DriverInfo.Status == driver.Running {
|
||||||
|
driverClient, err := n.Driver.Client(ctx, clientOpt...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
workers, err := driverClient.ListWorkers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "listing workers")
|
||||||
|
}
|
||||||
|
for idx, w := range workers {
|
||||||
|
n.IDs = append(n.IDs, w.ID)
|
||||||
|
n.Platforms = append(n.Platforms, w.Platforms...)
|
||||||
|
if idx == 0 {
|
||||||
|
n.GCPolicy = w.GCPolicy
|
||||||
|
n.Labels = w.Labels
|
||||||
|
}
|
||||||
|
n.CDIDevices = w.CDIDevices
|
||||||
|
}
|
||||||
|
sort.Strings(n.IDs)
|
||||||
|
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||||
|
inf, err := driverClient.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
|
||||||
|
n.Version, err = n.Driver.Version(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
n.Version = inf.BuildkitVersion.Version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
75
cmd/buildx/debug.go
Normal file
75
cmd/buildx/debug.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"runtime/pprof"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/util/bklog"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupDebugProfiles(ctx context.Context) (stop func()) {
|
||||||
|
var stopFuncs []func()
|
||||||
|
if fn := setupCPUProfile(ctx); fn != nil {
|
||||||
|
stopFuncs = append(stopFuncs, fn)
|
||||||
|
}
|
||||||
|
if fn := setupHeapProfile(ctx); fn != nil {
|
||||||
|
stopFuncs = append(stopFuncs, fn)
|
||||||
|
}
|
||||||
|
return func() {
|
||||||
|
for _, fn := range stopFuncs {
|
||||||
|
fn()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupCPUProfile(ctx context.Context) (stop func()) {
|
||||||
|
if cpuProfile := os.Getenv("BUILDX_CPU_PROFILE"); cpuProfile != "" {
|
||||||
|
f, err := os.Create(cpuProfile)
|
||||||
|
if err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not create cpu profile", logrus.WithError(err))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pprof.StartCPUProfile(f); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not start cpu profile", logrus.WithError(err))
|
||||||
|
_ = f.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
pprof.StopCPUProfile()
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not close file for cpu profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupHeapProfile(ctx context.Context) (stop func()) {
|
||||||
|
if heapProfile := os.Getenv("BUILDX_MEM_PROFILE"); heapProfile != "" {
|
||||||
|
// Memory profile is only created on stop.
|
||||||
|
return func() {
|
||||||
|
f, err := os.Create(heapProfile)
|
||||||
|
if err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not create memory profile", logrus.WithError(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// get up-to-date statistics
|
||||||
|
runtime.GC()
|
||||||
|
|
||||||
|
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not write memory profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
bklog.G(ctx).Warn("could not close file for memory profile", logrus.WithError(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,54 +1,81 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/containerd/containerd/pkg/seed"
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
|
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli-plugins/manager"
|
"github.com/docker/cli/cli-plugins/metadata"
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
|
||||||
|
|
||||||
_ "github.com/docker/buildx/driver/docker"
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
_ "github.com/docker/buildx/driver/docker-container"
|
_ "github.com/docker/buildx/driver/docker-container"
|
||||||
_ "github.com/docker/buildx/driver/kubernetes"
|
_ "github.com/docker/buildx/driver/kubernetes"
|
||||||
_ "github.com/docker/buildx/driver/remote"
|
_ "github.com/docker/buildx/driver/remote"
|
||||||
|
|
||||||
|
// Use custom grpc codec to utilize vtprotobuf
|
||||||
|
_ "github.com/moby/buildkit/util/grpcutil/encoding/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
seed.WithTimeAndRand()
|
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runStandalone(cmd *command.DockerCli) error {
|
func runStandalone(cmd *command.DockerCli) error {
|
||||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
defer flushMetrics(cmd)
|
||||||
return err
|
executable := os.Args[0]
|
||||||
}
|
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
|
||||||
return rootCmd.Execute()
|
return rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// flushMetrics will manually flush metrics from the configured
|
||||||
|
// meter provider. This is needed when running in standalone mode
|
||||||
|
// because the meter provider is initialized by the cli library,
|
||||||
|
// but the mechanism for forcing it to report is not presently
|
||||||
|
// exposed and not invoked when run in standalone mode.
|
||||||
|
// There are plans to fix that in the next release, but this is
|
||||||
|
// needed temporarily until the API for this is more thorough.
|
||||||
|
func flushMetrics(cmd *command.DockerCli) {
|
||||||
|
if mp, ok := cmd.MeterProvider().(command.MeterProvider); ok {
|
||||||
|
if err := mp.ForceFlush(context.Background()); err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func runPlugin(cmd *command.DockerCli) error {
|
func runPlugin(cmd *command.DockerCli) error {
|
||||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
||||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
return plugin.RunPlugin(cmd, rootCmd, metadata.Metadata{
|
||||||
SchemaVersion: "0.1.0",
|
SchemaVersion: "0.1.0",
|
||||||
Vendor: "Docker Inc.",
|
Vendor: "Docker Inc.",
|
||||||
Version: version.Version,
|
Version: version.Version,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func run(cmd *command.DockerCli) error {
|
||||||
|
stopProfiles := setupDebugProfiles(context.TODO())
|
||||||
|
defer stopProfiles()
|
||||||
|
|
||||||
|
if plugin.RunningStandalone() {
|
||||||
|
return runStandalone(cmd)
|
||||||
|
}
|
||||||
|
return runPlugin(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd, err := command.NewDockerCli()
|
cmd, err := command.NewDockerCli()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -56,15 +83,11 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if plugin.RunningStandalone() {
|
if err = run(cmd); err == nil {
|
||||||
err = runStandalone(cmd)
|
|
||||||
} else {
|
|
||||||
err = runPlugin(cmd)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check the error from the run function above.
|
||||||
if sterr, ok := err.(cli.StatusError); ok {
|
if sterr, ok := err.(cli.StatusError); ok {
|
||||||
if sterr.Status != "" {
|
if sterr.Status != "" {
|
||||||
fmt.Fprintln(cmd.Err(), sterr.Status)
|
fmt.Fprintln(cmd.Err(), sterr.Status)
|
||||||
@@ -86,5 +109,15 @@ func main() {
|
|||||||
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ebr *desktop.ErrorWithBuildRef
|
||||||
|
if errors.As(err, &ebr) {
|
||||||
|
ebr.Print(cmd.Err())
|
||||||
|
} else {
|
||||||
|
var be *controllererrors.BuildError
|
||||||
|
if errors.As(err, &be) {
|
||||||
|
be.PrintBuildDetails(cmd.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"github.com/moby/buildkit/util/tracing/detect"
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/env"
|
_ "github.com/moby/buildkit/util/tracing/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1,4 @@
|
|||||||
comment: false
|
comment: false
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
- "**/*.pb.go"
|
||||||
|
|||||||
790
commands/bake.go
790
commands/bake.go
@@ -1,34 +1,76 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/bake"
|
"github.com/docker/buildx/bake"
|
||||||
|
"github.com/docker/buildx/bake/hclparser"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
overrides []string
|
overrides []string
|
||||||
printOnly bool
|
|
||||||
commonOptions
|
sbom string
|
||||||
|
provenance string
|
||||||
|
allow []string
|
||||||
|
|
||||||
|
builder string
|
||||||
|
metadataFile string
|
||||||
|
exportPush bool
|
||||||
|
exportLoad bool
|
||||||
|
callFunc string
|
||||||
|
|
||||||
|
print bool
|
||||||
|
list string
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
listTargets bool
|
||||||
|
listVars bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
|
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||||
ctx := appcontext.Context()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
|
||||||
|
attribute.String("builder", in.builder),
|
||||||
|
attribute.StringSlice("targets", targets),
|
||||||
|
attribute.StringSlice("files", in.files),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -36,82 +78,162 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
end(err)
|
end(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var url string
|
url, cmdContext, targets := bakeArgs(targets)
|
||||||
cmdContext := "cwd://"
|
|
||||||
|
|
||||||
if len(targets) > 0 {
|
|
||||||
if bake.IsRemoteURL(targets[0]) {
|
|
||||||
url = targets[0]
|
|
||||||
targets = targets[1:]
|
|
||||||
if len(targets) > 0 {
|
|
||||||
if bake.IsRemoteURL(targets[0]) {
|
|
||||||
cmdContext = targets[0]
|
|
||||||
targets = targets[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
if len(targets) == 0 {
|
||||||
targets = []string{"default"}
|
targets = []string{"default"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
callFunc, err := buildflags.ParseCallFunc(in.callFunc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
overrides := in.overrides
|
overrides := in.overrides
|
||||||
if in.exportPush {
|
if in.exportPush {
|
||||||
if in.exportLoad {
|
|
||||||
return errors.Errorf("push and load may not be set together at the moment")
|
|
||||||
}
|
|
||||||
overrides = append(overrides, "*.push=true")
|
overrides = append(overrides, "*.push=true")
|
||||||
} else if in.exportLoad {
|
|
||||||
overrides = append(overrides, "*.output=type=docker")
|
|
||||||
}
|
}
|
||||||
if in.noCache != nil {
|
if in.exportLoad {
|
||||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *in.noCache))
|
overrides = append(overrides, "*.load=true")
|
||||||
}
|
}
|
||||||
if in.pull != nil {
|
if callFunc != nil {
|
||||||
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
|
overrides = append(overrides, fmt.Sprintf("*.call=%s", callFunc.Name))
|
||||||
|
}
|
||||||
|
if cFlags.noCache != nil {
|
||||||
|
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
|
||||||
|
}
|
||||||
|
if cFlags.pull != nil {
|
||||||
|
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *cFlags.pull))
|
||||||
|
}
|
||||||
|
if in.sbom != "" {
|
||||||
|
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.sbom)))
|
||||||
|
}
|
||||||
|
if in.provenance != "" {
|
||||||
|
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("provenance", in.provenance)))
|
||||||
}
|
}
|
||||||
contextPathHash, _ := os.Getwd()
|
contextPathHash, _ := os.Getwd()
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ent, err := bake.ParseEntitlements(in.allow)
|
||||||
defer cancel()
|
if err != nil {
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
return err
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if printer != nil {
|
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
}
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to get current working directory")
|
||||||
}
|
}
|
||||||
}()
|
// filesystem access under the current working directory is allowed by default
|
||||||
|
ent.FSRead = append(ent.FSRead, wd)
|
||||||
|
ent.FSWrite = append(ent.FSWrite, wd)
|
||||||
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
|
defer cancel(errors.WithStack(context.Canceled))
|
||||||
|
|
||||||
|
var nodes []builder.Node
|
||||||
|
var progressConsoleDesc, progressTextDesc string
|
||||||
|
|
||||||
|
if in.print && in.list != "" {
|
||||||
|
return errors.New("--print and --list are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
|
// instance only needed for reading remote bake files or building
|
||||||
|
var driverType string
|
||||||
|
if url != "" || !(in.print || in.list != "") {
|
||||||
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
|
builder.WithContextPathHash(contextPathHash),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
|
}
|
||||||
|
nodes, err = b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
||||||
|
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
||||||
|
driverType = b.Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in)
|
||||||
|
|
||||||
|
progressMode := progressui.DisplayMode(cFlags.progress)
|
||||||
|
var printer *progress.Printer
|
||||||
|
|
||||||
|
makePrinter := func() error {
|
||||||
|
var err error
|
||||||
|
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||||
|
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||||
|
progress.WithMetrics(mp, attributes),
|
||||||
|
progress.WithOnClose(func() {
|
||||||
|
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := makePrinter(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var files []bake.File
|
if len(files) == 0 {
|
||||||
var inp *bake.Input
|
return errors.New("couldn't find a bake definition")
|
||||||
|
|
||||||
if url != "" {
|
|
||||||
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
|
||||||
} else {
|
|
||||||
files, err = bake.ReadLocalFiles(in.files)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
defaults := map[string]string{
|
||||||
// don't forget to update documentation if you add a new
|
// don't forget to update documentation if you add a new
|
||||||
// built-in variable: docs/guides/bake/file-definition.md#built-in-variables
|
// built-in variable: docs/bake-reference.md#built-in-variables
|
||||||
"BAKE_CMD_CONTEXT": cmdContext,
|
"BAKE_CMD_CONTEXT": cmdContext,
|
||||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if in.list != "" {
|
||||||
|
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err = printer.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
list, err := parseList(in.list)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch list.Type {
|
||||||
|
case "targets":
|
||||||
|
return printTargetList(dockerCli.Out(), list.Format, cfg)
|
||||||
|
case "variables":
|
||||||
|
return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults, &ent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("SOURCE_DATE_EPOCH"); v != "" {
|
||||||
|
// TODO: extract env var parsing to a method easily usable by library consumers
|
||||||
|
for _, t := range tgts {
|
||||||
|
if _, ok := t.Args["SOURCE_DATE_EPOCH"]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if t.Args == nil {
|
||||||
|
t.Args = map[string]*string{}
|
||||||
|
}
|
||||||
|
t.Args["SOURCE_DATE_EPOCH"] = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// this function can update target context string from the input so call before printOnly check
|
// this function can update target context string from the input so call before printOnly check
|
||||||
bo, err := bake.TargetsToBuildOpt(tgts, inp)
|
bo, err := bake.TargetsToBuildOpt(tgts, inp)
|
||||||
@@ -119,52 +241,207 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
def := struct {
|
||||||
var defg map[string]*bake.Group
|
|
||||||
if len(grps) == 1 {
|
|
||||||
defg = map[string]*bake.Group{
|
|
||||||
"default": grps[0],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dt, err := json.MarshalIndent(struct {
|
|
||||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||||
Target map[string]*bake.Target `json:"target"`
|
Target map[string]*bake.Target `json:"target"`
|
||||||
}{
|
}{
|
||||||
defg,
|
Group: grps,
|
||||||
tgts,
|
Target: tgts,
|
||||||
}, "", " ")
|
}
|
||||||
|
|
||||||
|
if in.print {
|
||||||
|
if err = printer.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = printer.Wait()
|
_, err = fmt.Fprintln(dockerCli.Out(), string(dtdef))
|
||||||
printer = nil
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range bo {
|
||||||
|
if opt.CallFunc != nil {
|
||||||
|
cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
opt.CallFunc.Name = cf.Name
|
||||||
return nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
exp, err := ent.Validate(bo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapBuildError(err, true)
|
return err
|
||||||
|
}
|
||||||
|
if progressMode != progressui.RawJSONMode {
|
||||||
|
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if printer.IsDone() {
|
||||||
|
// init new printer as old one was stopped to show the prompt
|
||||||
|
if err := makePrinter(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
done := timeBuildCommand(mp, attributes)
|
||||||
|
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), printer)
|
||||||
|
if err := printer.Wait(); retErr == nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
if retErr != nil {
|
||||||
|
err = wrapBuildError(retErr, true)
|
||||||
|
}
|
||||||
|
done(err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||||
|
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||||
|
}
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 {
|
||||||
dt := make(map[string]interface{})
|
dt := make(map[string]any)
|
||||||
for t, r := range resp {
|
for t, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||||
}
|
}
|
||||||
|
if callFunc == nil {
|
||||||
|
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||||
|
dt["buildx.build.warnings"] = warnings
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var callFormatJSON bool
|
||||||
|
jsonResults := map[string]map[string]any{}
|
||||||
|
if callFunc != nil {
|
||||||
|
callFormatJSON = callFunc.Format == "json"
|
||||||
|
}
|
||||||
|
var sep bool
|
||||||
|
var exitCode int
|
||||||
|
|
||||||
|
names := make([]string, 0, len(bo))
|
||||||
|
for name := range bo {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
slices.Sort(names)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
req := bo[name]
|
||||||
|
if req.CallFunc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pf := &pb.CallFunc{
|
||||||
|
Name: req.CallFunc.Name,
|
||||||
|
Format: req.CallFunc.Format,
|
||||||
|
IgnoreStatus: req.CallFunc.IgnoreStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
if callFunc != nil {
|
||||||
|
pf.Format = callFunc.Format
|
||||||
|
pf.IgnoreStatus = callFunc.IgnoreStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
var res map[string]string
|
||||||
|
if sp, ok := resp[name]; ok {
|
||||||
|
res = sp.ExporterResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
if callFormatJSON {
|
||||||
|
jsonResults[name] = map[string]any{}
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if code, err := printResult(buf, pf, res, name, &req.Inputs); err != nil {
|
||||||
|
jsonResults[name]["error"] = err.Error()
|
||||||
|
exitCode = 1
|
||||||
|
} else if code != 0 && exitCode == 0 {
|
||||||
|
exitCode = code
|
||||||
|
}
|
||||||
|
m := map[string]*json.RawMessage{}
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &m); err == nil {
|
||||||
|
for k, v := range m {
|
||||||
|
jsonResults[name][k] = v
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
jsonResults[name][pf.Name] = json.RawMessage(buf.Bytes())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sep {
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
} else {
|
||||||
|
sep = true
|
||||||
|
}
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||||
|
if descr := tgts[name].Description; descr != "" {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "%s\n", descr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
if code, err := printResult(dockerCli.Out(), pf, res, name, &req.Inputs); err != nil {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||||
|
exitCode = 1
|
||||||
|
} else if code != 0 && exitCode == 0 {
|
||||||
|
exitCode = code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if callFormatJSON {
|
||||||
|
out := struct {
|
||||||
|
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||||
|
Target map[string]map[string]any `json:"target"`
|
||||||
|
}{
|
||||||
|
Group: grps,
|
||||||
|
Target: map[string]map[string]any{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, def := range tgts {
|
||||||
|
out.Target[name] = map[string]any{
|
||||||
|
"build": def,
|
||||||
|
}
|
||||||
|
if res, ok := jsonResults[name]; ok {
|
||||||
|
printName := bo[name].CallFunc.Name
|
||||||
|
if printName == "lint" {
|
||||||
|
printName = "check"
|
||||||
|
}
|
||||||
|
out.Target[name][printName] = res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dt, err := json.MarshalIndent(out, "", " ")
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
if sp, ok := resp[name]; ok {
|
||||||
|
if v, ok := sp.ExporterResponse["frontend.result.inlinemessage"]; ok {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n# %s\n%s\n", name, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if exitCode != 0 {
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
var options bakeOptions
|
var options bakeOptions
|
||||||
|
var cFlags commonFlags
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "bake [OPTIONS] [TARGET...]",
|
Use: "bake [OPTIONS] [TARGET...]",
|
||||||
@@ -173,25 +450,376 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
// reset to nil to avoid override is unset
|
// reset to nil to avoid override is unset
|
||||||
if !cmd.Flags().Lookup("no-cache").Changed {
|
if !cmd.Flags().Lookup("no-cache").Changed {
|
||||||
options.noCache = nil
|
cFlags.noCache = nil
|
||||||
}
|
}
|
||||||
if !cmd.Flags().Lookup("pull").Changed {
|
if !cmd.Flags().Lookup("pull").Changed {
|
||||||
options.pull = nil
|
cFlags.pull = nil
|
||||||
}
|
}
|
||||||
options.commonOptions.builder = rootOpts.builder
|
if options.list == "" {
|
||||||
return runBake(dockerCli, args, options)
|
if options.listTargets {
|
||||||
|
options.list = "targets"
|
||||||
|
} else if options.listVars {
|
||||||
|
options.list = "variables"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
options.builder = rootOpts.builder
|
||||||
|
options.metadataFile = cFlags.metadataFile
|
||||||
|
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
||||||
|
return runBake(cmd.Context(), dockerCli, args, options, cFlags)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.BakeTargets(options.files),
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||||
|
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||||
|
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||||
|
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||||
|
flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources")
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||||
|
flags.Lookup("check").NoOptDefVal = "true"
|
||||||
|
|
||||||
|
flags.BoolVar(&options.print, "print", false, "Print the options without building")
|
||||||
|
flags.StringVar(&options.list, "list", "", "List targets or variables")
|
||||||
|
|
||||||
|
// TODO: remove deprecated flags
|
||||||
|
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||||
|
flags.MarkHidden("list-targets")
|
||||||
|
flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
|
||||||
|
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||||
|
flags.MarkHidden("list-variables")
|
||||||
|
flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
|
||||||
|
|
||||||
|
commonBuildFlags(&cFlags, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
|
||||||
|
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer l.MigrateIfNeeded()
|
||||||
|
|
||||||
|
prm := confutil.MetadataProvenance()
|
||||||
|
if len(in.metadataFile) == 0 {
|
||||||
|
prm = confutil.MetadataProvenanceModeDisabled
|
||||||
|
}
|
||||||
|
groupRef := identity.NewID()
|
||||||
|
refs := make([]string, 0, len(bo))
|
||||||
|
for k, b := range bo {
|
||||||
|
if b.CallFunc != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b.Ref = identity.NewID()
|
||||||
|
b.GroupRef = groupRef
|
||||||
|
b.ProvenanceResponseMode = prm
|
||||||
|
refs = append(refs, b.Ref)
|
||||||
|
bo[k] = b
|
||||||
|
}
|
||||||
|
if len(refs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||||
|
Refs: refs,
|
||||||
|
Targets: targets,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// bakeArgs will retrieve the remote url, command context, and targets
|
||||||
|
// from the command line arguments.
|
||||||
|
func bakeArgs(args []string) (url, cmdContext string, targets []string) {
|
||||||
|
cmdContext, targets = "cwd://", args
|
||||||
|
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
url, targets = targets[0], targets[1:]
|
||||||
|
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
cmdContext, targets = targets[0], targets[1:]
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
|
||||||
|
func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) {
|
||||||
|
var lnames []string // local
|
||||||
|
var rnames []string // remote
|
||||||
|
var anames []string // both
|
||||||
|
for _, v := range names {
|
||||||
|
if strings.HasPrefix(v, "cwd://") {
|
||||||
|
tname := strings.TrimPrefix(v, "cwd://")
|
||||||
|
lnames = append(lnames, tname)
|
||||||
|
anames = append(anames, tname)
|
||||||
|
} else {
|
||||||
|
rnames = append(rnames, v)
|
||||||
|
anames = append(anames, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if url != "" {
|
||||||
|
var rfiles []bake.File
|
||||||
|
rfiles, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, rnames, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
files = append(files, rfiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lnames) > 0 || url == "" {
|
||||||
|
var lfiles []bake.File
|
||||||
|
progress.Wrap("[internal] load local bake definitions", pw.Write, func(sub progress.SubLogger) error {
|
||||||
|
if url != "" {
|
||||||
|
lfiles, err = bake.ReadLocalFiles(lnames, stdin, sub)
|
||||||
|
} else {
|
||||||
|
lfiles, err = bake.ReadLocalFiles(anames, stdin, sub)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
files = append(files, lfiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type listEntry struct {
|
||||||
|
Type string
|
||||||
|
Format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseList(input string) (listEntry, error) {
|
||||||
|
res := listEntry{}
|
||||||
|
|
||||||
|
fields, err := csvvalue.Fields(input, nil)
|
||||||
|
if err != nil {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
|
||||||
|
res.Type = input
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.Type == "" {
|
||||||
|
for _, field := range fields {
|
||||||
|
key, value, ok := strings.Cut(field, "=")
|
||||||
|
if !ok {
|
||||||
|
return res, errors.Errorf("invalid value %s", field)
|
||||||
|
}
|
||||||
|
key = strings.TrimSpace(strings.ToLower(key))
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
res.Type = value
|
||||||
|
case "format":
|
||||||
|
res.Format = value
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Format == "" {
|
||||||
|
res.Format = "table"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Type {
|
||||||
|
case "targets", "variables":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list type %q", res.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.Format {
|
||||||
|
case "table", "json":
|
||||||
|
default:
|
||||||
|
return res, errors.Errorf("invalid list format %q", res.Format)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
|
||||||
|
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(vars)
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
|
||||||
|
tw.Write([]byte("VARIABLE\tVALUE\tDESCRIPTION\n"))
|
||||||
|
|
||||||
|
for _, v := range vars {
|
||||||
|
var value string
|
||||||
|
if v.Value != nil {
|
||||||
|
value = *v.Value
|
||||||
|
} else {
|
||||||
|
value = "<null>"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", v.Name, value, v.Description)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
|
||||||
|
type targetOrGroup struct {
|
||||||
|
name string
|
||||||
|
target *bake.Target
|
||||||
|
group *bake.Group
|
||||||
|
}
|
||||||
|
|
||||||
|
list := make([]targetOrGroup, 0, len(cfg.Targets)+len(cfg.Groups))
|
||||||
|
for _, tgt := range cfg.Targets {
|
||||||
|
list = append(list, targetOrGroup{name: tgt.Name, target: tgt})
|
||||||
|
}
|
||||||
|
for _, grp := range cfg.Groups {
|
||||||
|
list = append(list, targetOrGroup{name: grp.Name, group: grp})
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(list, func(a, b targetOrGroup) int {
|
||||||
|
return cmp.Compare(a.name, b.name)
|
||||||
|
})
|
||||||
|
|
||||||
|
var tw *tabwriter.Writer
|
||||||
|
if format == "table" {
|
||||||
|
tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
type targetList struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Group bool `json:"group,omitempty"`
|
||||||
|
}
|
||||||
|
var targetsList []targetList
|
||||||
|
|
||||||
|
for _, tgt := range list {
|
||||||
|
if strings.HasPrefix(tgt.name, "_") {
|
||||||
|
// convention for a private target
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var descr string
|
||||||
|
if tgt.target != nil {
|
||||||
|
descr = tgt.target.Description
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
|
||||||
|
} else if tgt.group != nil {
|
||||||
|
descr = tgt.group.Description
|
||||||
|
if len(tgt.group.Targets) > 0 {
|
||||||
|
slices.Sort(tgt.group.Targets)
|
||||||
|
names := strings.Join(tgt.group.Targets, ", ")
|
||||||
|
if descr != "" {
|
||||||
|
descr += " (" + names + ")"
|
||||||
|
} else {
|
||||||
|
descr = names
|
||||||
|
}
|
||||||
|
}
|
||||||
|
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
|
||||||
|
}
|
||||||
|
if format == "table" {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if format == "json" {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(targetsList)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set {
|
||||||
|
return attribute.NewSet(
|
||||||
|
commandNameAttribute.String("bake"),
|
||||||
|
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||||
|
bakeOptions: options,
|
||||||
|
cfg: confutil.NewConfig(dockerCli),
|
||||||
|
url: url,
|
||||||
|
cmdContext: cmdContext,
|
||||||
|
targets: targets,
|
||||||
|
}),
|
||||||
|
driverNameAttribute.String(options.builder),
|
||||||
|
driverTypeAttribute.String(driverType),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bakeOptionsHash struct {
|
||||||
|
*bakeOptions
|
||||||
|
cfg *confutil.Config
|
||||||
|
url string
|
||||||
|
cmdContext string
|
||||||
|
targets []string
|
||||||
|
result string
|
||||||
|
resultOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *bakeOptionsHash) String() string {
|
||||||
|
o.resultOnce.Do(func() {
|
||||||
|
url := o.url
|
||||||
|
cmdContext := o.cmdContext
|
||||||
|
if cmdContext == "cwd://" {
|
||||||
|
// Resolve the directory if the cmdContext is the current working directory.
|
||||||
|
cmdContext = osutil.GetWd()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the inputs for files and targets since the ordering
|
||||||
|
// doesn't matter, but avoid modifying the original slice.
|
||||||
|
files := immutableSort(o.files)
|
||||||
|
targets := immutableSort(o.targets)
|
||||||
|
|
||||||
|
joinedFiles := strings.Join(files, ",")
|
||||||
|
joinedTargets := strings.Join(targets, ",")
|
||||||
|
salt := o.cfg.TryNodeIdentifier()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||||
|
_, _ = io.WriteString(h, s)
|
||||||
|
h.Write([]byte{0})
|
||||||
|
}
|
||||||
|
o.result = hex.EncodeToString(h.Sum(nil))
|
||||||
|
})
|
||||||
|
return o.result
|
||||||
|
}
|
||||||
|
|
||||||
|
// immutableSort will sort the entries in s without modifying the original slice.
|
||||||
|
func immutableSort(s []string) []string {
|
||||||
|
if !sort.StringsAreSorted(s) {
|
||||||
|
cpy := make([]string, len(s))
|
||||||
|
copy(cpy, s)
|
||||||
|
sort.Strings(cpy)
|
||||||
|
return cpy
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncWriter struct {
|
||||||
|
w io.Writer
|
||||||
|
once sync.Once
|
||||||
|
wait func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||||
|
w.once.Do(func() {
|
||||||
|
if w.wait != nil {
|
||||||
|
err = w.wait()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return w.w.Write(p)
|
||||||
|
}
|
||||||
|
|||||||
1266
commands/build.go
1266
commands/build.go
File diff suppressed because it is too large
Load Diff
@@ -3,24 +3,15 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/store"
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/google/shlex"
|
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,249 +23,60 @@ type createOptions struct {
|
|||||||
actionAppend bool
|
actionAppend bool
|
||||||
actionLeave bool
|
actionLeave bool
|
||||||
use bool
|
use bool
|
||||||
flags string
|
|
||||||
configFile string
|
|
||||||
driverOpts []string
|
driverOpts []string
|
||||||
|
buildkitdFlags string
|
||||||
|
buildkitdConfigFile string
|
||||||
bootstrap bool
|
bootstrap bool
|
||||||
// upgrade bool // perform upgrade of the driver
|
// upgrade bool // perform upgrade of the driver
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
if in.name == "default" {
|
|
||||||
return errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.actionLeave {
|
|
||||||
if in.name == "" {
|
|
||||||
return errors.Errorf("leave requires instance name")
|
|
||||||
}
|
|
||||||
if in.nodeName == "" {
|
|
||||||
return errors.Errorf("leave requires node name but --node not set")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.actionAppend {
|
|
||||||
if in.name == "" {
|
|
||||||
logrus.Warnf("append used without name, creating a new instance instead")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Ensure the file lock gets released no matter what happens.
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
name := in.name
|
|
||||||
if name == "" {
|
|
||||||
name, err = store.GenerateName(txn)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !in.actionLeave && !in.actionAppend {
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, c := range contexts {
|
|
||||||
if c.Name == name {
|
|
||||||
logrus.Warnf("instance name %q already exists as context builder", name)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ng, err := txn.NodeGroupByName(name)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(errors.Cause(err)) {
|
|
||||||
if in.actionAppend && in.name != "" {
|
|
||||||
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
|
||||||
}
|
|
||||||
if in.actionLeave {
|
if in.actionLeave {
|
||||||
return errors.Errorf("failed to find instance %q for leave", in.name)
|
return builder.Leave(ctx, txn, dockerCli, builder.LeaveOpts{
|
||||||
}
|
Name: in.name,
|
||||||
} else {
|
NodeName: in.nodeName,
|
||||||
return err
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
|
||||||
|
|
||||||
driverName := in.driver
|
|
||||||
if driverName == "" {
|
|
||||||
if ng != nil {
|
|
||||||
driverName = ng.Driver
|
|
||||||
} else if len(args) == 0 && buildkitHost != "" {
|
|
||||||
driverName = "remote"
|
|
||||||
} else {
|
|
||||||
var arg string
|
|
||||||
if len(args) > 0 {
|
|
||||||
arg = args[0]
|
|
||||||
}
|
|
||||||
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f == nil {
|
|
||||||
return errors.Errorf("no valid drivers found")
|
|
||||||
}
|
|
||||||
driverName = f.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng != nil {
|
|
||||||
if in.nodeName == "" && !in.actionAppend {
|
|
||||||
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name)
|
|
||||||
}
|
|
||||||
if driverName != ng.Driver {
|
|
||||||
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if driver.GetFactory(driverName, true) == nil {
|
|
||||||
return errors.Errorf("failed to find driver %q", driverName)
|
|
||||||
}
|
|
||||||
|
|
||||||
ngOriginal := ng
|
|
||||||
if ngOriginal != nil {
|
|
||||||
ngOriginal = ngOriginal.Copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng == nil {
|
|
||||||
ng = &store.NodeGroup{
|
|
||||||
Name: name,
|
|
||||||
Driver: driverName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags []string
|
|
||||||
if in.flags != "" {
|
|
||||||
flags, err = shlex.Split(in.flags)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to parse buildkit flags")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var ep string
|
var ep string
|
||||||
var setEp bool
|
|
||||||
if in.actionLeave {
|
|
||||||
if err := ng.Leave(in.nodeName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
switch {
|
|
||||||
case driverName == "kubernetes":
|
|
||||||
if len(args) > 0 {
|
|
||||||
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0])
|
|
||||||
}
|
|
||||||
// naming endpoint to make --append works
|
|
||||||
ep = (&url.URL{
|
|
||||||
Scheme: driverName,
|
|
||||||
Path: "/" + in.name,
|
|
||||||
RawQuery: (&url.Values{
|
|
||||||
"deployment": {in.nodeName},
|
|
||||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
|
||||||
}).Encode(),
|
|
||||||
}).String()
|
|
||||||
setEp = false
|
|
||||||
case driverName == "remote":
|
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
ep = args[0]
|
ep = args[0]
|
||||||
} else if buildkitHost != "" {
|
|
||||||
ep = buildkitHost
|
|
||||||
} else {
|
|
||||||
return errors.Errorf("no remote endpoint provided")
|
|
||||||
}
|
|
||||||
ep, err = validateBuildkitEndpoint(ep)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
case len(args) > 0:
|
|
||||||
ep, err = validateEndpoint(dockerCli, args[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
default:
|
|
||||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
|
||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
|
||||||
}
|
|
||||||
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := csvToMap(in.driverOpts)
|
b, err := builder.Create(ctx, txn, dockerCli, builder.CreateOpts{
|
||||||
|
Name: in.name,
|
||||||
|
Driver: in.driver,
|
||||||
|
NodeName: in.nodeName,
|
||||||
|
Platforms: in.platform,
|
||||||
|
DriverOpts: in.driverOpts,
|
||||||
|
BuildkitdFlags: in.buildkitdFlags,
|
||||||
|
BuildkitdConfigFile: in.buildkitdConfigFile,
|
||||||
|
Use: in.use,
|
||||||
|
Endpoint: ep,
|
||||||
|
Append: in.actionAppend,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.configFile == "" {
|
// The store is no longer used from this point.
|
||||||
// if buildkit config is not provided, check if the default one is
|
// Release it so we aren't holding the file lock during the boot.
|
||||||
// available and use it
|
release()
|
||||||
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
|
||||||
logrus.Warnf("Using default BuildKit config in %s", f)
|
|
||||||
in.configFile = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ng.Update(in.nodeName, ep, in.platform, setEp, in.actionAppend, flags, in.configFile, m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := txn.Save(ng); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, info := range ngi.drivers {
|
|
||||||
if err := info.di.Err; err != nil {
|
|
||||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, info.di.Name, err)
|
|
||||||
var err2 error
|
|
||||||
if ngOriginal == nil {
|
|
||||||
err2 = txn.Remove(ng.Name)
|
|
||||||
} else {
|
|
||||||
err2 = txn.Save(ngOriginal)
|
|
||||||
}
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Warnf("Could not rollback to previous state: %s", err2)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.use && ep != "" {
|
|
||||||
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
if _, err = boot(ctx, ngi); err != nil {
|
if _, err = b.Boot(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s\n", ng.Name)
|
fmt.Printf("%s\n", b.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,7 +84,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
var drivers bytes.Buffer
|
var drivers bytes.Buffer
|
||||||
for _, d := range driver.GetFactories() {
|
for _, d := range driver.GetFactories(true) {
|
||||||
if len(drivers.String()) > 0 {
|
if len(drivers.String()) > 0 {
|
||||||
drivers.WriteString(", ")
|
drivers.WriteString(", ")
|
||||||
}
|
}
|
||||||
@@ -294,8 +96,9 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Short: "Create a new builder instance",
|
Short: "Create a new builder instance",
|
||||||
Args: cli.RequiresMaxArgs(1),
|
Args: cli.RequiresMaxArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
@@ -303,12 +106,16 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
||||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
|
||||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
|
||||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
flags.StringVar(&options.buildkitdFlags, "buildkitd-flags", "", "BuildKit daemon flags")
|
||||||
|
|
||||||
|
// we allow for both "--config" and "--buildkitd-config", although the latter is the recommended way to avoid ambiguity.
|
||||||
|
flags.StringVar(&options.buildkitdConfigFile, "buildkitd-config", "", "BuildKit daemon config file")
|
||||||
|
flags.StringVar(&options.buildkitdConfigFile, "config", "", "BuildKit daemon config file")
|
||||||
|
flags.MarkHidden("config")
|
||||||
|
|
||||||
|
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||||
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
||||||
@@ -318,25 +125,3 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func csvToMap(in []string) (map[string]string, error) {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string, len(in))
|
|
||||||
for _, s := range in {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(s))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, v := range fields {
|
|
||||||
p := strings.SplitN(v, "=", 2)
|
|
||||||
if len(p) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
|
||||||
}
|
|
||||||
m[p[0]] = p[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCsvToMap(t *testing.T) {
|
|
||||||
d := []string{
|
|
||||||
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
|
||||||
"namespace=default",
|
|
||||||
}
|
|
||||||
r, err := csvToMap(d)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, r, "tolerations")
|
|
||||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
|
||||||
|
|
||||||
require.Contains(t, r, "replicas")
|
|
||||||
require.Equal(t, r["replicas"], "1")
|
|
||||||
|
|
||||||
require.Contains(t, r, "namespace")
|
|
||||||
require.Equal(t, r["namespace"], "default")
|
|
||||||
}
|
|
||||||
92
commands/debug/root.go
Normal file
92
commands/debug/root.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/controller"
|
||||||
|
"github.com/docker/buildx/controller/control"
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/monitor"
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DebugConfig is a user-specified configuration for the debugger.
|
||||||
|
type DebugConfig struct {
|
||||||
|
// InvokeFlag is a flag to configure the launched debugger and the commaned executed on the debugger.
|
||||||
|
InvokeFlag string
|
||||||
|
|
||||||
|
// OnFlag is a flag to configure the timing of launching the debugger.
|
||||||
|
OnFlag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebuggableCmd is a command that supports debugger with recognizing the user-specified DebugConfig.
|
||||||
|
type DebuggableCmd interface {
|
||||||
|
// NewDebugger returns the new *cobra.Command with support for the debugger with recognizing DebugConfig.
|
||||||
|
NewDebugger(*DebugConfig) *cobra.Command
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
||||||
|
var controlOptions control.ControlOptions
|
||||||
|
var progressMode string
|
||||||
|
var options DebugConfig
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "debug",
|
||||||
|
Short: "Start debugger",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.DisplayMode(progressMode))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := c.Close(); err != nil {
|
||||||
|
logrus.Warnf("failed to close server connection %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
con := console.Current()
|
||||||
|
if err := con.SetRaw(); err != nil {
|
||||||
|
return errors.Errorf("failed to configure terminal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = monitor.RunMonitor(ctx, "", nil, &controllerapi.InvokeConfig{
|
||||||
|
Tty: true,
|
||||||
|
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
||||||
|
con.Reset()
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cobrautil.MarkCommandExperimental(cmd)
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
|
||||||
|
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
|
||||||
|
|
||||||
|
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
|
||||||
|
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
|
||||||
|
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
|
||||||
|
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
||||||
|
|
||||||
|
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
|
||||||
|
|
||||||
|
for _, c := range children {
|
||||||
|
cmd.AddCommand(c.NewDebugger(&options))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
131
commands/dial_stdio.go
Normal file
131
commands/dial_stdio.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stdioOptions struct {
|
||||||
|
builder string
|
||||||
|
platform string
|
||||||
|
progress string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDialStdio(dockerCli command.Cli, opts stdioOptions) error {
|
||||||
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
|
contextPathHash, _ := os.Getwd()
|
||||||
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(opts.builder),
|
||||||
|
builder.WithContextPathHash(contextPathHash),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
|
}
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
printer, err := progress.NewPrinter(ctx, os.Stderr, progressui.DisplayMode(opts.progress), progress.WithPhase("dial-stdio"), progress.WithDesc("builder: "+b.Name, "builder:"+b.Name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var p *v1.Platform
|
||||||
|
if opts.platform != "" {
|
||||||
|
pp, err := platforms.Parse(opts.platform)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid platform %q", opts.platform)
|
||||||
|
}
|
||||||
|
p = &pp
|
||||||
|
}
|
||||||
|
|
||||||
|
defer printer.Wait()
|
||||||
|
|
||||||
|
return progress.Wrap("Proxying to builder", printer.Write, func(sub progress.SubLogger) error {
|
||||||
|
var conn net.Conn
|
||||||
|
|
||||||
|
err := sub.Wrap("Dialing builder", func() error {
|
||||||
|
conn, err = build.Dial(ctx, nodes, printer, p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
closeWrite(conn)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var eg errgroup.Group
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
_, err := io.Copy(conn, os.Stdin)
|
||||||
|
closeWrite(conn)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
eg.Go(func() error {
|
||||||
|
_, err := io.Copy(os.Stdout, conn)
|
||||||
|
closeRead(conn)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return eg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeRead(conn net.Conn) error {
|
||||||
|
if c, ok := conn.(interface{ CloseRead() error }); ok {
|
||||||
|
return c.CloseRead()
|
||||||
|
}
|
||||||
|
return conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeWrite(conn net.Conn) error {
|
||||||
|
if c, ok := conn.(interface{ CloseWrite() error }); ok {
|
||||||
|
return c.CloseWrite()
|
||||||
|
}
|
||||||
|
return conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
|
opts := stdioOptions{}
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "dial-stdio",
|
||||||
|
Short: "Proxy current stdio streams to builder instance",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
opts.builder = rootOpts.builder
|
||||||
|
return runDialStdio(dockerCli, opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection")
|
||||||
|
flags.StringVar(&opts.progress, "progress", "quiet", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -8,13 +9,13 @@ import (
|
|||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -25,33 +26,35 @@ type duOptions struct {
|
|||||||
verbose bool
|
verbose bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, di := range dis {
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if di.Err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([][]*client.UsageInfo, len(dis))
|
out := make([][]*client.UsageInfo, len(nodes))
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for i, di := range dis {
|
for i, node := range nodes {
|
||||||
func(i int, di build.DriverInfo) {
|
func(i int, node builder.Node) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
if di.Driver != nil {
|
if node.Driver != nil {
|
||||||
c, err := di.Driver.Client(ctx)
|
c, err := node.Driver.Client(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -64,7 +67,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(i, di)
|
}(i, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
@@ -109,8 +112,9 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
return runDiskUsage(dockerCli, options)
|
return runDiskUsage(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
@@ -120,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func printKV(w io.Writer, k string, v interface{}) {
|
func printKV(w io.Writer, k string, v any) {
|
||||||
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
160
commands/history/export.go
Normal file
160
commands/history/export.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop/bundle"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type exportOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
output string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runExport(ctx context.Context, dockerCli command.Cli, opts exportOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx, builder.WithData())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.refs) == 0 {
|
||||||
|
opts.refs = []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []historyRecord
|
||||||
|
for _, ref := range opts.refs {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref == "" {
|
||||||
|
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
res = append(res, recs...)
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
res = append(res, recs[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
visited := map[*builder.Node]struct{}{}
|
||||||
|
var clients []*client.Client
|
||||||
|
for _, rec := range res {
|
||||||
|
if _, ok := visited[rec.node]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clients = append(clients, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport := make([]*bundle.Record, 0, len(res))
|
||||||
|
for _, rec := range res {
|
||||||
|
var defaultPlatform string
|
||||||
|
if p := rec.node.Platforms; len(p) > 0 {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
var stg *localstate.StateGroup
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
if st != nil && st.GroupRef != "" {
|
||||||
|
stg, err = ls.ReadGroup(st.GroupRef)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toExport = append(toExport, &bundle.Record{
|
||||||
|
BuildHistoryRecord: rec.BuildHistoryRecord,
|
||||||
|
DefaultPlatform: defaultPlatform,
|
||||||
|
LocalState: st,
|
||||||
|
StateGroup: stg,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var w io.Writer = os.Stdout
|
||||||
|
if opts.output != "" {
|
||||||
|
f, err := os.Create(opts.output)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create output file %q", opts.output)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
w = f
|
||||||
|
} else {
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
return errors.Errorf("refusing to write to console, use --output to specify a file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bundle.Export(ctx, clients, w, toExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options exportOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "export [OPTIONS] [REF]",
|
||||||
|
Short: "Export a build into Docker Desktop bundle",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if options.all && len(args) > 0 {
|
||||||
|
return errors.New("cannot specify refs when using --all")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runExport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVarP(&options.output, "output", "o", "", "Output file path")
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Export all records for the builder")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
135
commands/history/import.go
Normal file
135
commands/history/import.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOptions struct {
|
||||||
|
file []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
|
||||||
|
sock, err := desktop.BuildServerAddr()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||||
|
network, addr, ok := strings.Cut(sock, "://")
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid endpoint address: %s", sock)
|
||||||
|
}
|
||||||
|
return remoteutil.DialContext(ctx, network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
|
||||||
|
if len(opts.file) == 0 {
|
||||||
|
u, err := importFrom(ctx, client, os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
} else {
|
||||||
|
for _, fn := range opts.file {
|
||||||
|
var f *os.File
|
||||||
|
var rdr io.Reader = os.Stdin
|
||||||
|
if fn != "-" {
|
||||||
|
f, err = os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to open file %s", fn)
|
||||||
|
}
|
||||||
|
rdr = f
|
||||||
|
}
|
||||||
|
u, err := importFrom(ctx, client, rdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
urls = append(urls, u...)
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(urls) == 0 {
|
||||||
|
return errors.New("no build records found in the bundle")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, url := range urls {
|
||||||
|
fmt.Fprintln(dockerCli.Err(), url)
|
||||||
|
if i == 0 {
|
||||||
|
err = browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create request")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, errors.Errorf("failed to import build: %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var refs []string
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
if err := dec.Decode(&refs); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to decode response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var urls []string
|
||||||
|
for _, ref := range refs {
|
||||||
|
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
|
||||||
|
}
|
||||||
|
return urls, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
|
||||||
|
var options importOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "import [OPTIONS] < bundle.dockerbuild",
|
||||||
|
Short: "Import a build into Docker Desktop",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runImport(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
893
commands/history/inspect.go
Normal file
893
commands/history/inspect.go
Normal file
@@ -0,0 +1,893 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/containerd/v2/core/images"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
|
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type statusT string
|
||||||
|
|
||||||
|
const (
|
||||||
|
statusComplete statusT = "completed"
|
||||||
|
statusRunning statusT = "running"
|
||||||
|
statusError statusT = "failed"
|
||||||
|
statusCanceled statusT = "canceled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type inspectOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
type inspectOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Ref string
|
||||||
|
|
||||||
|
Context string `json:",omitempty"`
|
||||||
|
Dockerfile string `json:",omitempty"`
|
||||||
|
VCSRepository string `json:",omitempty"`
|
||||||
|
VCSRevision string `json:",omitempty"`
|
||||||
|
Target string `json:",omitempty"`
|
||||||
|
Platform []string `json:",omitempty"`
|
||||||
|
KeepGitDir bool `json:",omitempty"`
|
||||||
|
|
||||||
|
NamedContexts []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
StartedAt *time.Time `json:",omitempty"`
|
||||||
|
CompletedAt *time.Time `json:",omitempty"`
|
||||||
|
Duration time.Duration `json:",omitempty"`
|
||||||
|
Status statusT `json:",omitempty"`
|
||||||
|
Error *errorOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
NumCompletedSteps int32
|
||||||
|
NumTotalSteps int32
|
||||||
|
NumCachedSteps int32
|
||||||
|
|
||||||
|
BuildArgs []keyValueOutput `json:",omitempty"`
|
||||||
|
Labels []keyValueOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Config configOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Materials []materialOutput `json:",omitempty"`
|
||||||
|
Attachments []attachmentOutput `json:",omitempty"`
|
||||||
|
|
||||||
|
Errors []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type configOutput struct {
|
||||||
|
Network string `json:",omitempty"`
|
||||||
|
ExtraHosts []string `json:",omitempty"`
|
||||||
|
Hostname string `json:",omitempty"`
|
||||||
|
CgroupParent string `json:",omitempty"`
|
||||||
|
ImageResolveMode string `json:",omitempty"`
|
||||||
|
MultiPlatform bool `json:",omitempty"`
|
||||||
|
NoCache bool `json:",omitempty"`
|
||||||
|
NoCacheFilter []string `json:",omitempty"`
|
||||||
|
|
||||||
|
ShmSize string `json:",omitempty"`
|
||||||
|
Ulimit string `json:",omitempty"`
|
||||||
|
CacheMountNS string `json:",omitempty"`
|
||||||
|
DockerfileCheckConfig string `json:",omitempty"`
|
||||||
|
SourceDateEpoch string `json:",omitempty"`
|
||||||
|
SandboxHostname string `json:",omitempty"`
|
||||||
|
|
||||||
|
RestRaw []keyValueOutput `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type materialOutput struct {
|
||||||
|
URI string `json:",omitempty"`
|
||||||
|
Digests []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachmentOutput struct {
|
||||||
|
Digest string `json:",omitempty"`
|
||||||
|
Platform string `json:",omitempty"`
|
||||||
|
Type string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorOutput struct {
|
||||||
|
Code int `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Logs []string `json:",omitempty"`
|
||||||
|
Sources []byte `json:",omitempty"`
|
||||||
|
Stack []byte `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyValueOutput struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Value string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
|
||||||
|
if sv, ok := attrs[k]; ok {
|
||||||
|
if f != nil {
|
||||||
|
v, ok := f(sv)
|
||||||
|
if ok {
|
||||||
|
*dest = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d, ok := any(dest).(*string); ok {
|
||||||
|
*d = sv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
var defaultPlatform string
|
||||||
|
workers, err := c.ListWorkers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to list workers")
|
||||||
|
}
|
||||||
|
workers0:
|
||||||
|
for _, w := range workers {
|
||||||
|
for _, p := range w.Platforms {
|
||||||
|
defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
|
||||||
|
break workers0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
|
||||||
|
attrs := rec.FrontendAttrs
|
||||||
|
delete(attrs, "frontend.caps")
|
||||||
|
|
||||||
|
var out inspectOutput
|
||||||
|
|
||||||
|
var context string
|
||||||
|
var dockerfile string
|
||||||
|
if st != nil {
|
||||||
|
context = st.LocalPath
|
||||||
|
dockerfile = st.DockerfilePath
|
||||||
|
wd, _ := os.Getwd()
|
||||||
|
|
||||||
|
if dockerfile != "" && dockerfile != "-" {
|
||||||
|
if rel, err := filepath.Rel(context, dockerfile); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
dockerfile = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if context != "" {
|
||||||
|
if rel, err := filepath.Rel(wd, context); err == nil {
|
||||||
|
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||||
|
context = rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := attrs["context"]; ok && context == "" {
|
||||||
|
delete(attrs, "context")
|
||||||
|
context = v
|
||||||
|
}
|
||||||
|
if dockerfile == "" {
|
||||||
|
if v, ok := attrs["filename"]; ok {
|
||||||
|
dockerfile = v
|
||||||
|
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
|
||||||
|
dockerfile = filepath.Join(dfdir, dockerfile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(attrs, "filename")
|
||||||
|
|
||||||
|
out.Name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out.Ref = rec.Ref
|
||||||
|
|
||||||
|
out.Context = context
|
||||||
|
out.Dockerfile = dockerfile
|
||||||
|
|
||||||
|
if _, ok := attrs["context"]; !ok {
|
||||||
|
if src, ok := attrs["vcs:source"]; ok {
|
||||||
|
out.VCSRepository = src
|
||||||
|
}
|
||||||
|
if rev, ok := attrs["vcs:revision"]; ok {
|
||||||
|
out.VCSRevision = rev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
readAttr(attrs, "target", &out.Target, nil)
|
||||||
|
|
||||||
|
readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
var pp []string
|
||||||
|
for _, v := range strings.Split(v, ",") {
|
||||||
|
p, err := platforms.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
|
||||||
|
}
|
||||||
|
if len(pp) == 0 {
|
||||||
|
pp = append(pp, defaultPlatform)
|
||||||
|
}
|
||||||
|
return pp, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
|
||||||
|
out.NamedContexts = readKeyValues(attrs, "context:")
|
||||||
|
|
||||||
|
if rec.CreatedAt != nil {
|
||||||
|
tm := rec.CreatedAt.AsTime().Local()
|
||||||
|
out.StartedAt = &tm
|
||||||
|
}
|
||||||
|
out.Status = statusRunning
|
||||||
|
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
tm := rec.CompletedAt.AsTime().Local()
|
||||||
|
out.CompletedAt = &tm
|
||||||
|
out.Status = statusComplete
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Error != nil || rec.ExternalError != nil {
|
||||||
|
out.Error = &errorOutput{}
|
||||||
|
if rec.Error != nil {
|
||||||
|
if codes.Code(rec.Error.Code) == codes.Canceled {
|
||||||
|
out.Status = statusCanceled
|
||||||
|
} else {
|
||||||
|
out.Status = statusError
|
||||||
|
}
|
||||||
|
out.Error.Code = int(codes.Code(rec.Error.Code))
|
||||||
|
out.Error.Message = rec.Error.Message
|
||||||
|
}
|
||||||
|
if rec.ExternalError != nil {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
var st spb.Status
|
||||||
|
if err := proto.Unmarshal(dt, &st); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
|
||||||
|
}
|
||||||
|
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
|
||||||
|
var errsources bytes.Buffer
|
||||||
|
for _, s := range errdefs.Sources(retErr) {
|
||||||
|
s.Print(&errsources)
|
||||||
|
errsources.WriteString("\n")
|
||||||
|
}
|
||||||
|
out.Error.Sources = errsources.Bytes()
|
||||||
|
var ve *errdefs.VertexError
|
||||||
|
if errors.As(retErr, &ve) {
|
||||||
|
dgst, err := digest.Parse(ve.Vertex.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
|
||||||
|
}
|
||||||
|
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
|
||||||
|
}
|
||||||
|
out.Error.Name = name
|
||||||
|
out.Error.Logs = logs
|
||||||
|
}
|
||||||
|
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.StartedAt != nil {
|
||||||
|
if out.CompletedAt != nil {
|
||||||
|
out.Duration = out.CompletedAt.Sub(*out.StartedAt)
|
||||||
|
} else {
|
||||||
|
out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.NumCompletedSteps = rec.NumCompletedSteps
|
||||||
|
out.NumTotalSteps = rec.NumTotalSteps
|
||||||
|
out.NumCachedSteps = rec.NumCachedSteps
|
||||||
|
|
||||||
|
out.BuildArgs = readKeyValues(attrs, "build-arg:")
|
||||||
|
out.Labels = readKeyValues(attrs, "label:")
|
||||||
|
|
||||||
|
readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
|
||||||
|
readAttr(attrs, "hostname", &out.Config.Hostname, nil)
|
||||||
|
readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
|
||||||
|
readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
return false, false
|
||||||
|
})
|
||||||
|
readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
|
||||||
|
if v == "" {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return strings.Split(v, ","), true
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
|
||||||
|
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||||
|
fields, err := csvvalue.Fields(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fields, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
|
||||||
|
readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
|
||||||
|
readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
|
||||||
|
readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
|
||||||
|
readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
|
||||||
|
|
||||||
|
var unusedAttrs []keyValueOutput
|
||||||
|
for k := range attrs {
|
||||||
|
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
unusedAttrs = append(unusedAttrs, keyValueOutput{
|
||||||
|
Name: k,
|
||||||
|
Value: attrs[k],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
out.Config.RestRaw = unusedAttrs
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
|
||||||
|
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
|
||||||
|
})
|
||||||
|
if provIndex != -1 {
|
||||||
|
prov := attachments[provIndex]
|
||||||
|
dt, err := content.ReadBlob(ctx, store, prov.descr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
var pred provenancetypes.ProvenancePredicate
|
||||||
|
if err := json.Unmarshal(dt, &pred); err != nil {
|
||||||
|
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
|
||||||
|
}
|
||||||
|
for _, m := range pred.Materials {
|
||||||
|
out.Materials = append(out.Materials, materialOutput{
|
||||||
|
URI: m.URI,
|
||||||
|
Digests: digestSetToDigests(m.Digest),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(attachments) > 0 {
|
||||||
|
for _, a := range attachments {
|
||||||
|
p := ""
|
||||||
|
if a.platform != nil {
|
||||||
|
p = platforms.FormatAll(*a.platform)
|
||||||
|
}
|
||||||
|
out.Attachments = append(out.Attachments, attachmentOutput{
|
||||||
|
Digest: a.descr.Digest.String(),
|
||||||
|
Platform: p,
|
||||||
|
Type: descrType(a.descr),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.format == formatter.JSONFormatKey {
|
||||||
|
enc := json.NewEncoder(dockerCli.Out())
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
return enc.Encode(out)
|
||||||
|
} else if opts.format != formatter.PrettyFormatKey {
|
||||||
|
tmpl, err := template.New("inspect").Parse(opts.format)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse format template")
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := tmpl.Execute(&buf, out); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to execute format template")
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), buf.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Name != "" {
|
||||||
|
fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
|
||||||
|
}
|
||||||
|
if opts.ref == "" && out.Ref != "" {
|
||||||
|
fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
|
||||||
|
}
|
||||||
|
if out.Context != "" {
|
||||||
|
fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
|
||||||
|
}
|
||||||
|
if out.Dockerfile != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
|
||||||
|
}
|
||||||
|
if out.VCSRepository != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
|
||||||
|
}
|
||||||
|
if out.VCSRevision != "" {
|
||||||
|
fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Target != "" {
|
||||||
|
fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Platform) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.KeepGitDir {
|
||||||
|
fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
|
||||||
|
var statusStr string
|
||||||
|
if out.Status == statusRunning {
|
||||||
|
statusStr = " (running)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
|
||||||
|
|
||||||
|
if out.Status == statusError {
|
||||||
|
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
|
||||||
|
} else if out.Status == statusCanceled {
|
||||||
|
fmt.Fprintf(tw, "Status:\tCanceled\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
|
||||||
|
if out.Config.Network != "" {
|
||||||
|
fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
|
||||||
|
}
|
||||||
|
if out.Config.Hostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
|
||||||
|
}
|
||||||
|
if len(out.Config.ExtraHosts) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
|
||||||
|
}
|
||||||
|
if out.Config.CgroupParent != "" {
|
||||||
|
fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
|
||||||
|
}
|
||||||
|
if out.Config.ImageResolveMode != "" {
|
||||||
|
fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
|
||||||
|
}
|
||||||
|
if out.Config.MultiPlatform {
|
||||||
|
fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
|
||||||
|
}
|
||||||
|
if out.Config.NoCache {
|
||||||
|
fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
|
||||||
|
}
|
||||||
|
if len(out.Config.NoCacheFilter) > 0 {
|
||||||
|
fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Config.ShmSize != "" {
|
||||||
|
fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
|
||||||
|
}
|
||||||
|
if out.Config.Ulimit != "" {
|
||||||
|
fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
|
||||||
|
}
|
||||||
|
if out.Config.CacheMountNS != "" {
|
||||||
|
fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
|
||||||
|
}
|
||||||
|
if out.Config.DockerfileCheckConfig != "" {
|
||||||
|
fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
|
||||||
|
}
|
||||||
|
if out.Config.SourceDateEpoch != "" {
|
||||||
|
fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
|
||||||
|
}
|
||||||
|
if out.Config.SandboxHostname != "" {
|
||||||
|
fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range out.Config.RestRaw {
|
||||||
|
fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
|
||||||
|
printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
|
||||||
|
printTable(dockerCli.Out(), out.Labels, "Label")
|
||||||
|
|
||||||
|
if len(out.Materials) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Materials:")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "URI\tDIGEST\n")
|
||||||
|
for _, m := range out.Materials {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Attachments) > 0 {
|
||||||
|
fmt.Fprintf(tw, "Attachments:\n")
|
||||||
|
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
|
||||||
|
for _, a := range out.Attachments {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.Error != nil {
|
||||||
|
if out.Error.Sources != nil {
|
||||||
|
fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
|
||||||
|
}
|
||||||
|
if len(out.Error.Logs) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Logs:")
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
|
||||||
|
for _, l := range out.Error.Logs {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "> "+l)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
if len(out.Error.Stack) > 0 {
|
||||||
|
if debug.IsEnabled() {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options inspectOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "inspect [OPTIONS] [REF]",
|
||||||
|
Short: "Inspect a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runInspect(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
attachmentCmd(dockerCli, rootOpts),
|
||||||
|
)
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
|
||||||
|
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
var logs []string
|
||||||
|
lastState := map[int]int{}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
st.CloseSend()
|
||||||
|
return "", nil, context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := st.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
ss := client.NewSolveStatus(ev)
|
||||||
|
for _, v := range ss.Vertexes {
|
||||||
|
if v.Digest == dgst {
|
||||||
|
name = v.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, l := range ss.Logs {
|
||||||
|
if l.Vertex == dgst {
|
||||||
|
parts := bytes.Split(l.Data, []byte("\n"))
|
||||||
|
for i, p := range parts {
|
||||||
|
var wrote bool
|
||||||
|
if i == 0 {
|
||||||
|
idx, ok := lastState[l.Stream]
|
||||||
|
if ok && idx != -1 {
|
||||||
|
logs[idx] = logs[idx] + string(p)
|
||||||
|
wrote = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !wrote {
|
||||||
|
if len(p) > 0 {
|
||||||
|
logs = append(logs, string(p))
|
||||||
|
}
|
||||||
|
lastState[l.Stream] = len(logs) - 1
|
||||||
|
}
|
||||||
|
if i == len(parts)-1 && len(p) == 0 {
|
||||||
|
lastState[l.Stream] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit > 0 && len(logs) > limit {
|
||||||
|
logs = logs[len(logs)-limit:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return name, logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type attachment struct {
|
||||||
|
platform *ocispecs.Platform
|
||||||
|
descr ocispecs.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
|
||||||
|
var attachments []attachment
|
||||||
|
|
||||||
|
if rec.Result != nil {
|
||||||
|
for _, a := range rec.Result.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range rec.Result.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, ri := range rec.Results {
|
||||||
|
p, err := platforms.Parse(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, a := range ri.Attestations {
|
||||||
|
attachments = append(attachments, attachment{
|
||||||
|
platform: &p,
|
||||||
|
descr: ociDesc(a),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, r := range ri.Results {
|
||||||
|
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(attachments, func(a, b attachment) int {
|
||||||
|
pCmp := 0
|
||||||
|
if a.platform == nil && b.platform != nil {
|
||||||
|
return -1
|
||||||
|
} else if a.platform != nil && b.platform == nil {
|
||||||
|
return 1
|
||||||
|
} else if a.platform != nil && b.platform != nil {
|
||||||
|
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
|
||||||
|
}
|
||||||
|
return cmp.Or(
|
||||||
|
pCmp,
|
||||||
|
cmp.Compare(descrType(a.descr), descrType(b.descr)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
return attachments, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
|
||||||
|
_, err := store.Info(ctx, desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []attachment
|
||||||
|
|
||||||
|
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
|
||||||
|
out = append(out, attachment{platform: platform, descr: desc})
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := content.ReadBlob(ctx, store, desc)
|
||||||
|
if err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var idx ocispecs.Index
|
||||||
|
if err := json.Unmarshal(dt, &idx); err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range idx.Manifests {
|
||||||
|
p := platform
|
||||||
|
if d.Platform != nil {
|
||||||
|
p = d.Platform
|
||||||
|
}
|
||||||
|
out = append(out, walkAttachments(ctx, store, d, p)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
|
||||||
|
return ocispecs.Descriptor{
|
||||||
|
MediaType: in.MediaType,
|
||||||
|
Digest: digest.Digest(in.Digest),
|
||||||
|
Size: in.Size,
|
||||||
|
Annotations: in.Annotations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func descrType(desc ocispecs.Descriptor) string {
|
||||||
|
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
return desc.MediaType
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
|
||||||
|
v, err := f(s)
|
||||||
|
if err != nil {
|
||||||
|
errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
|
||||||
|
*errs = append(*errs, errStr)
|
||||||
|
}
|
||||||
|
return v, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTable(w io.Writer, kvs []keyValueOutput, title string) {
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
|
||||||
|
for _, k := range kvs {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
|
||||||
|
var out []keyValueOutput
|
||||||
|
for k, v := range attrs {
|
||||||
|
if strings.HasPrefix(k, prefix) {
|
||||||
|
out = append(out, keyValueOutput{
|
||||||
|
Name: strings.TrimPrefix(k, prefix),
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
slices.SortFunc(out, func(a, b keyValueOutput) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func digestSetToDigests(ds slsa.DigestSet) []string {
|
||||||
|
var out []string
|
||||||
|
for k, v := range ds {
|
||||||
|
out = append(out, fmt.Sprintf("%s:%s", k, v))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
145
commands/history/inspect_attachment.go
Normal file
145
commands/history/inspect_attachment.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
intoto "github.com/in-toto/in-toto-golang/in_toto"
|
||||||
|
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type attachmentOptions struct {
|
||||||
|
builder string
|
||||||
|
typ string
|
||||||
|
platform string
|
||||||
|
ref string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
if opts.digest != "" {
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attachments, err := allAttachments(ctx, store, *rec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := opts.typ
|
||||||
|
switch typ {
|
||||||
|
case "index":
|
||||||
|
typ = ocispecs.MediaTypeImageIndex
|
||||||
|
case "manifest":
|
||||||
|
typ = ocispecs.MediaTypeImageManifest
|
||||||
|
case "image":
|
||||||
|
typ = ocispecs.MediaTypeImageConfig
|
||||||
|
case "provenance":
|
||||||
|
typ = slsa02.PredicateSLSAProvenance
|
||||||
|
case "sbom":
|
||||||
|
typ = intoto.PredicateSPDX
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range attachments {
|
||||||
|
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if typ != "" && descrType(a.descr) != typ {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ra, err := store.ReaderAt(ctx, a.descr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options attachmentOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "attachment [OPTIONS] REF [DIGEST]",
|
||||||
|
Short: "Inspect a build attachment",
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
dgst, err := digest.Parse(args[1])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid digest %q", args[1])
|
||||||
|
}
|
||||||
|
options.digest = dgst
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.digest == "" && options.platform == "" && options.typ == "" {
|
||||||
|
return errors.New("at least one of --type, --platform or DIGEST must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runAttachment(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.typ, "type", "", "Type of attachment")
|
||||||
|
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
117
commands/history/logs.go
Normal file
117
commands/history/logs.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logsOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
progress string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
|
||||||
|
if mode == progressui.AutoMode {
|
||||||
|
mode = progressui.PlainMode
|
||||||
|
}
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
cl.CloseSend()
|
||||||
|
return context.Cause(ctx)
|
||||||
|
default:
|
||||||
|
ev, err := cl.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break loop0
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
printer.Write(client.NewSolveStatus(ev))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return printer.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options logsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "logs [OPTIONS] [REF]",
|
||||||
|
Short: "Print the logs of a build",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLogs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
264
commands/history/ls.go
Normal file
264
commands/history/ls.go
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lsHeaderBuildID = "BUILD ID"
|
||||||
|
lsHeaderName = "NAME"
|
||||||
|
lsHeaderStatus = "STATUS"
|
||||||
|
lsHeaderCreated = "CREATED AT"
|
||||||
|
lsHeaderDuration = "DURATION"
|
||||||
|
lsHeaderLink = ""
|
||||||
|
|
||||||
|
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
|
||||||
|
|
||||||
|
headerKeyTimestamp = "buildkit-current-timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type lsOptions struct {
|
||||||
|
builder string
|
||||||
|
format string
|
||||||
|
noTrunc bool
|
||||||
|
|
||||||
|
filters []string
|
||||||
|
local bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queryOptions := &queryOptions{}
|
||||||
|
|
||||||
|
if opts.local {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||||
|
if err != nil {
|
||||||
|
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||||
|
return errors.Wrap(err, "git was not found in the system")
|
||||||
|
}
|
||||||
|
return errors.Wrapf(err, "could not find git repository for local filter")
|
||||||
|
}
|
||||||
|
remote, err := gitc.RemoteURL()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "could not get remote URL for local filter")
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, fmt.Sprintf("repository=%s", remote))
|
||||||
|
}
|
||||||
|
queryOptions.Filters = append(queryOptions.Filters, opts.filters...)
|
||||||
|
|
||||||
|
out, err := queryRecords(ctx, "", nodes, queryOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, rec := range out {
|
||||||
|
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||||
|
rec.name = buildName(rec.FrontendAttrs, st)
|
||||||
|
out[i] = rec
|
||||||
|
}
|
||||||
|
|
||||||
|
return lsPrint(dockerCli, out, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options lsOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "ls",
|
||||||
|
Short: "List build records",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runLs(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
flags.StringArrayVar(&options.filters, "filter", nil, `Provide filter values (e.g., "status=error")`)
|
||||||
|
flags.BoolVar(&options.local, "local", false, "List records for current repository only")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
|
||||||
|
if in.format == formatter.TableFormatKey {
|
||||||
|
in.format = lsDefaultTableFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.Format(in.format),
|
||||||
|
Trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(records, func(a, b historyRecord) int {
|
||||||
|
if a.CompletedAt == nil && b.CompletedAt != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.CompletedAt != nil && b.CompletedAt == nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
|
for _, r := range records {
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: formatter.Format(in.format),
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
record: &r,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lsCtx := lsContext{
|
||||||
|
isTerm: term,
|
||||||
|
trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
lsCtx.Header = formatter.SubHeaderContext{
|
||||||
|
"Ref": lsHeaderBuildID,
|
||||||
|
"Name": lsHeaderName,
|
||||||
|
"Status": lsHeaderStatus,
|
||||||
|
"CreatedAt": lsHeaderCreated,
|
||||||
|
"Duration": lsHeaderDuration,
|
||||||
|
"Link": lsHeaderLink,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Write(&lsCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsContext struct {
|
||||||
|
formatter.HeaderContext
|
||||||
|
|
||||||
|
isTerm bool
|
||||||
|
trunc bool
|
||||||
|
format formatter.Format
|
||||||
|
record *historyRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]any{
|
||||||
|
"ref": c.FullRef(),
|
||||||
|
"name": c.Name(),
|
||||||
|
"status": c.Status(),
|
||||||
|
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
|
||||||
|
"total_steps": c.record.NumTotalSteps,
|
||||||
|
"completed_steps": c.record.NumCompletedSteps,
|
||||||
|
"cached_steps": c.record.NumCachedSteps,
|
||||||
|
}
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Ref() string {
|
||||||
|
return c.record.Ref
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) FullRef() string {
|
||||||
|
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Name() string {
|
||||||
|
name := c.record.name
|
||||||
|
if c.trunc && c.format.IsTable() {
|
||||||
|
return trimBeginning(name, 36)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Status() string {
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
if c.record.Error != nil {
|
||||||
|
return "Error"
|
||||||
|
}
|
||||||
|
return "Completed"
|
||||||
|
}
|
||||||
|
return "Running"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) CreatedAt() string {
|
||||||
|
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Duration() string {
|
||||||
|
lastTime := c.record.currentTimestamp
|
||||||
|
if c.record.CompletedAt != nil {
|
||||||
|
tm := c.record.CompletedAt.AsTime()
|
||||||
|
lastTime = &tm
|
||||||
|
}
|
||||||
|
if lastTime == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
|
||||||
|
if c.record.CompletedAt == nil {
|
||||||
|
v += "+"
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Link() string {
|
||||||
|
url := desktop.BuildURL(c.FullRef())
|
||||||
|
if c.format.IsTable() {
|
||||||
|
if c.isTerm {
|
||||||
|
return desktop.ANSIHyperlink(url, "Open")
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return url
|
||||||
|
}
|
||||||
73
commands/history/open.go
Normal file
73
commands/history/open.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/desktop"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type openOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if opts.ref == "" {
|
||||||
|
return errors.New("no records found")
|
||||||
|
}
|
||||||
|
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||||
|
return browser.OpenURL(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options openOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "open [OPTIONS] [REF]",
|
||||||
|
Short: "Open a build in Docker Desktop",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runOpen(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
151
commands/history/rm.go
Normal file
151
commands/history/rm.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rmOptions struct {
|
||||||
|
builder string
|
||||||
|
refs []string
|
||||||
|
all bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make([][]error, len(opts.refs))
|
||||||
|
for i := range errs {
|
||||||
|
errs[i] = make([]error, len(nodes))
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
refs := opts.refs
|
||||||
|
|
||||||
|
if opts.all {
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
refs = append(refs, resp.Record.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, ref := range refs {
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: ref,
|
||||||
|
Delete: true,
|
||||||
|
})
|
||||||
|
if opts.all {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errs[j][i] = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []error
|
||||||
|
loop0:
|
||||||
|
for _, nodeErrs := range errs {
|
||||||
|
var nodeErr error
|
||||||
|
for _, err1 := range nodeErrs {
|
||||||
|
if err1 == nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
if nodeErr == nil {
|
||||||
|
nodeErr = err1
|
||||||
|
} else {
|
||||||
|
nodeErr = multierror.Append(nodeErr, err1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, nodeErr)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(out) == 1 {
|
||||||
|
return out[0]
|
||||||
|
}
|
||||||
|
return multierror.Append(out[0], out[1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options rmOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm [OPTIONS] [REF...]",
|
||||||
|
Short: "Remove build records",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) == 0 && !options.all {
|
||||||
|
return errors.New("rm requires at least one argument")
|
||||||
|
}
|
||||||
|
if len(args) > 0 && options.all {
|
||||||
|
return errors.New("rm requires either --all or at least one argument")
|
||||||
|
}
|
||||||
|
options.refs = args
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runRm(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&options.all, "all", false, "Remove all build records")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
33
commands/history/root.go
Normal file
33
commands/history/root.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RootOptions struct {
|
||||||
|
Builder *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "history",
|
||||||
|
Short: "Commands to work on build records",
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
RunE: rootcmd.RunE,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(
|
||||||
|
lsCmd(dockerCli, opts),
|
||||||
|
rmCmd(dockerCli, opts),
|
||||||
|
logsCmd(dockerCli, opts),
|
||||||
|
inspectCmd(dockerCli, opts),
|
||||||
|
openCmd(dockerCli, opts),
|
||||||
|
traceCmd(dockerCli, opts),
|
||||||
|
importCmd(dockerCli, opts),
|
||||||
|
exportCmd(dockerCli, opts),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
228
commands/history/trace.go
Normal file
228
commands/history/trace.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/otelutil"
|
||||||
|
"github.com/docker/buildx/util/otelutil/jaeger"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/browser"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
jaegerui "github.com/tonistiigi/jaeger-ui-rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type traceOptions struct {
|
||||||
|
builder string
|
||||||
|
ref string
|
||||||
|
addr string
|
||||||
|
compare string
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
||||||
|
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
if ref == "" {
|
||||||
|
return "", nil, errors.New("no records found")
|
||||||
|
}
|
||||||
|
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
||||||
|
}
|
||||||
|
rec := &recs[0]
|
||||||
|
|
||||||
|
if rec.CompletedAt == nil {
|
||||||
|
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rec.Trace == nil {
|
||||||
|
// build is complete but no trace yet. try to finalize the trace
|
||||||
|
time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||||
|
Ref: rec.Ref,
|
||||||
|
Finalize: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
|
||||||
|
CompletedOnly: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recs) == 0 {
|
||||||
|
return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
rec = &recs[0]
|
||||||
|
if rec.Trace == nil {
|
||||||
|
return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := rec.node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
|
||||||
|
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
|
||||||
|
Digest: digest.Digest(rec.Trace.Digest),
|
||||||
|
MediaType: rec.Trace.MediaType,
|
||||||
|
Size: rec.Trace.Size,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapper := struct {
|
||||||
|
Data []jaeger.Trace `json:"data"`
|
||||||
|
}{
|
||||||
|
Data: spans.JaegerData().Data,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(wrapper.Data) == 0 {
|
||||||
|
return "", nil, errors.New("no trace data")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
if err := enc.Encode(wrapper); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
|
||||||
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
traceID, data, err := loadTrace(ctx, opts.ref, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srv := jaegerui.NewServer(jaegerui.Config{})
|
||||||
|
if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url := "/trace/" + traceID
|
||||||
|
|
||||||
|
if opts.compare != "" {
|
||||||
|
traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
|
||||||
|
}
|
||||||
|
if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
url = "/trace/" + traceIDcomp + "..." + traceID
|
||||||
|
}
|
||||||
|
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !term && opts.compare == "" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), string(data))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ln, err := net.Listen("tcp", opts.addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
browser.OpenURL(url)
|
||||||
|
}()
|
||||||
|
|
||||||
|
url = "http://" + ln.Addr().String() + url
|
||||||
|
fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
ln.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = srv.Serve(ln)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
|
var options traceOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "trace [OPTIONS] [REF]",
|
||||||
|
Short: "Show the OpenTelemetry trace of a build record",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
options.ref = args[0]
|
||||||
|
}
|
||||||
|
options.builder = *rootOpts.Builder
|
||||||
|
return runTrace(cmd.Context(), dockerCli, options)
|
||||||
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
|
||||||
|
flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
403
commands/history/utils.go
Normal file
403
commands/history/utils.go
Normal file
@@ -0,0 +1,403 @@
|
|||||||
|
package history
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const recordsLimit = 50
|
||||||
|
|
||||||
|
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||||
|
var res string
|
||||||
|
|
||||||
|
var target, contextPath, dockerfilePath, vcsSource string
|
||||||
|
if v, ok := fattrs["target"]; ok {
|
||||||
|
target = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["context"]; ok {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
|
||||||
|
contextPath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:source"]; ok {
|
||||||
|
vcsSource = v
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(v)
|
||||||
|
}
|
||||||
|
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
|
||||||
|
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
var localPath string
|
||||||
|
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
|
||||||
|
if ls.LocalPath != "" && ls.LocalPath != "-" {
|
||||||
|
localPath = filepath.ToSlash(ls.LocalPath)
|
||||||
|
}
|
||||||
|
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
|
||||||
|
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove default dockerfile name
|
||||||
|
const defaultFilename = "/Dockerfile"
|
||||||
|
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
|
||||||
|
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
|
||||||
|
|
||||||
|
// dockerfile is a subpath of context
|
||||||
|
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
|
||||||
|
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
|
||||||
|
} else {
|
||||||
|
// Otherwise, use basename
|
||||||
|
bpath := localPath
|
||||||
|
if len(dockerfilePath) > 0 {
|
||||||
|
bpath = dockerfilePath
|
||||||
|
}
|
||||||
|
if len(bpath) > 0 {
|
||||||
|
lidx := strings.LastIndex(bpath, "/")
|
||||||
|
res = bpath[lidx+1:]
|
||||||
|
if !hasDefaultFileName {
|
||||||
|
if lidx != -1 {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
|
||||||
|
} else {
|
||||||
|
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(contextPath) > 0 {
|
||||||
|
res = contextPath
|
||||||
|
}
|
||||||
|
if len(target) > 0 {
|
||||||
|
if len(res) > 0 {
|
||||||
|
res = res + " (" + target + ")"
|
||||||
|
} else {
|
||||||
|
res = target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res == "" && vcsSource != "" {
|
||||||
|
return vcsSource
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimBeginning(s string, n int) string {
|
||||||
|
if len(s) <= n {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return ".." + s[len(s)-n+2:]
|
||||||
|
}
|
||||||
|
|
||||||
|
type historyRecord struct {
|
||||||
|
*controlapi.BuildHistoryRecord
|
||||||
|
currentTimestamp *time.Time
|
||||||
|
node *builder.Node
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type queryOptions struct {
|
||||||
|
CompletedOnly bool
|
||||||
|
Filters []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
|
||||||
|
var mu sync.Mutex
|
||||||
|
var out []historyRecord
|
||||||
|
|
||||||
|
var offset *int
|
||||||
|
if strings.HasPrefix(ref, "^") {
|
||||||
|
off, err := strconv.Atoi(ref[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid offset %q", ref)
|
||||||
|
}
|
||||||
|
offset = &off
|
||||||
|
ref = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var filters []string
|
||||||
|
if opts != nil {
|
||||||
|
filters = opts.Filters
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for _, node := range nodes {
|
||||||
|
node := node
|
||||||
|
eg.Go(func() error {
|
||||||
|
if node.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var records []historyRecord
|
||||||
|
c, err := node.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchers []matchFunc
|
||||||
|
if len(filters) > 0 {
|
||||||
|
filters, matchers, err = dockerFiltersToBuildkit(filters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sb := bytes.NewBuffer(nil)
|
||||||
|
w := csv.NewWriter(sb)
|
||||||
|
w.Write(filters)
|
||||||
|
w.Flush()
|
||||||
|
filters = []string{strings.TrimSuffix(sb.String(), "\n")}
|
||||||
|
}
|
||||||
|
|
||||||
|
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
EarlyExit: true,
|
||||||
|
Ref: ref,
|
||||||
|
Limit: recordsLimit,
|
||||||
|
Filter: filters,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
md, err := serv.Header()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var ts *time.Time
|
||||||
|
if v, ok := md[headerKeyTimestamp]; ok {
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, v[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ts = &t
|
||||||
|
}
|
||||||
|
defer serv.CloseSend()
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
he, err := serv.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// for older buildkit that don't support filters apply local filters
|
||||||
|
for _, matcher := range matchers {
|
||||||
|
if !matcher(he.Record) {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
records = append(records, historyRecord{
|
||||||
|
BuildHistoryRecord: he.Record,
|
||||||
|
currentTimestamp: ts,
|
||||||
|
node: &node,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
out = append(out, records...)
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(out, func(a, b historyRecord) int {
|
||||||
|
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||||
|
})
|
||||||
|
|
||||||
|
if offset != nil {
|
||||||
|
var filtered []historyRecord
|
||||||
|
for _, r := range out {
|
||||||
|
if *offset > 0 {
|
||||||
|
*offset--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, r)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if *offset > 0 {
|
||||||
|
return nil, errors.Errorf("no completed build found with offset %d", *offset)
|
||||||
|
}
|
||||||
|
out = filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||||
|
}
|
||||||
|
|
||||||
|
type matchFunc func(*controlapi.BuildHistoryRecord) bool
|
||||||
|
|
||||||
|
func dockerFiltersToBuildkit(in []string) ([]string, []matchFunc, error) {
|
||||||
|
out := []string{}
|
||||||
|
matchers := []matchFunc{}
|
||||||
|
for _, f := range in {
|
||||||
|
key, value, sep, found := cutAny(f, "!=", "=", "<=", "<", ">=", ">")
|
||||||
|
if !found {
|
||||||
|
return nil, nil, errors.Errorf("invalid filter %q", f)
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "ref", "repository", "status":
|
||||||
|
if sep != "=" && sep != "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected = or !=", f)
|
||||||
|
}
|
||||||
|
matchers = append(matchers, valueFiler(key, value, sep))
|
||||||
|
if sep == "=" {
|
||||||
|
if key == "status" {
|
||||||
|
sep = "=="
|
||||||
|
} else {
|
||||||
|
sep = "~="
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "startedAt", "completedAt", "duration":
|
||||||
|
if sep == "=" || sep == "!=" {
|
||||||
|
return nil, nil, errors.Errorf("invalid separator for %q, expected <=, <, >= or >", f)
|
||||||
|
}
|
||||||
|
matcher, err := timeBasedFilter(key, value, sep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
matchers = append(matchers, matcher)
|
||||||
|
default:
|
||||||
|
return nil, nil, errors.Errorf("unsupported filter %q", f)
|
||||||
|
}
|
||||||
|
out = append(out, key+sep+value)
|
||||||
|
}
|
||||||
|
return out, matchers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueFiler(key, value, sep string) matchFunc {
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var recValue string
|
||||||
|
switch key {
|
||||||
|
case "ref":
|
||||||
|
recValue = rec.Ref
|
||||||
|
case "repository":
|
||||||
|
v, ok := rec.FrontendAttrs["vcs:source"]
|
||||||
|
if ok {
|
||||||
|
recValue = v
|
||||||
|
} else {
|
||||||
|
if context, ok := rec.FrontendAttrs["context"]; ok {
|
||||||
|
if ref, err := gitutil.ParseGitRef(context); err == nil {
|
||||||
|
recValue = ref.Remote
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "status":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
if rec.Error != nil {
|
||||||
|
if strings.Contains(rec.Error.Message, "context canceled") {
|
||||||
|
recValue = "canceled"
|
||||||
|
} else {
|
||||||
|
recValue = "error"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "completed"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
recValue = "running"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case "=":
|
||||||
|
if key == "status" {
|
||||||
|
return recValue == value
|
||||||
|
}
|
||||||
|
return strings.Contains(recValue, value)
|
||||||
|
case "!=":
|
||||||
|
return recValue != value
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeBasedFilter(key, value, sep string) (matchFunc, error) {
|
||||||
|
var cmp int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt", "completedAt":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err == nil {
|
||||||
|
tm := time.Now().Add(-v)
|
||||||
|
cmp = tm.Unix()
|
||||||
|
} else {
|
||||||
|
tm, err := time.Parse(time.RFC3339, value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid time %s", value)
|
||||||
|
}
|
||||||
|
cmp = tm.Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
v, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid duration %s", value)
|
||||||
|
}
|
||||||
|
cmp = int64(v)
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(rec *controlapi.BuildHistoryRecord) bool {
|
||||||
|
var val int64
|
||||||
|
switch key {
|
||||||
|
case "startedAt":
|
||||||
|
val = rec.CreatedAt.AsTime().Unix()
|
||||||
|
case "completedAt":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = rec.CompletedAt.AsTime().Unix()
|
||||||
|
}
|
||||||
|
case "duration":
|
||||||
|
if rec.CompletedAt != nil {
|
||||||
|
val = int64(rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch sep {
|
||||||
|
case ">=":
|
||||||
|
return val >= cmp
|
||||||
|
case "<=":
|
||||||
|
return val <= cmp
|
||||||
|
case ">":
|
||||||
|
return val > cmp
|
||||||
|
default:
|
||||||
|
return val < cmp
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cutAny(s string, seps ...string) (before, after, sep string, found bool) {
|
||||||
|
for _, sep := range seps {
|
||||||
|
if idx := strings.Index(s, sep); idx != -1 {
|
||||||
|
return s[:idx], s[idx+len(sep):], sep, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, "", "", false
|
||||||
|
}
|
||||||
@@ -7,13 +7,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -25,12 +26,14 @@ type createOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
files []string
|
files []string
|
||||||
tags []string
|
tags []string
|
||||||
|
annotations []string
|
||||||
dryrun bool
|
dryrun bool
|
||||||
actionAppend bool
|
actionAppend bool
|
||||||
progress string
|
progress string
|
||||||
|
preferIndex bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
if len(args) == 0 && len(in.files) == 0 {
|
if len(args) == 0 && len(in.files) == 0 {
|
||||||
return errors.Errorf("no sources specified")
|
return errors.Errorf("no sources specified")
|
||||||
}
|
}
|
||||||
@@ -39,7 +42,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileArgs := make([]string, len(in.files))
|
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||||
for i, f := range in.files {
|
for i, f := range in.files {
|
||||||
dt, err := os.ReadFile(f)
|
dt, err := os.ReadFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -90,47 +93,32 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, s := range srcs {
|
for i, s := range srcs {
|
||||||
if s.Ref == nil && s.Desc.MediaType == "" && s.Desc.Digest != "" {
|
if s.Ref == nil {
|
||||||
if defaultRepo == nil {
|
if defaultRepo == nil {
|
||||||
return errors.Errorf("multiple repositories specified, cannot infer repository for %q", args[i])
|
return errors.Errorf("multiple repositories specified, cannot infer repository for %q", args[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := reference.ParseNormalizedNamed(*defaultRepo)
|
n, err := reference.ParseNormalizedNamed(*defaultRepo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if s.Desc.MediaType == "" && s.Desc.Digest != "" {
|
||||||
r, err := reference.WithDigest(n, s.Desc.Digest)
|
r, err := reference.WithDigest(n, s.Desc.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srcs[i].Ref = r
|
srcs[i].Ref = r
|
||||||
sourceRefs = true
|
sourceRefs = true
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
srcs[i].Ref = reference.TagNameOnly(n)
|
||||||
if err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
imageopt, err := b.ImageOpt()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -167,7 +155,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, desc, err := r.Combine(ctx, srcs)
|
annotations, err := buildflags.ParseAnnotations(in.annotations)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse annotations")
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, desc, err := r.Combine(ctx, srcs, annotations, in.preferIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -180,9 +173,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imageopt)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
pw := progress.WithPrefix(printer, "internal", true)
|
pw := progress.WithPrefix(printer, "internal", true)
|
||||||
@@ -198,7 +194,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
s := s
|
s := s
|
||||||
eg2.Go(func() error {
|
eg2.Go(func() error {
|
||||||
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
|
||||||
return r.Copy(ctx, s, t)
|
return r.Copy(ctx, s, t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -206,7 +202,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
if err := eg2.Wait(); err != nil {
|
if err := eg2.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
|
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
|
||||||
return r.Push(ctx, t, desc, dt)
|
return r.Push(ctx, t, desc, dt)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -282,8 +278,9 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
Short: "Create a new image based on source images",
|
Short: "Create a new image based on source images",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *opts.Builder
|
options.builder = *opts.Builder
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
@@ -291,7 +288,9 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
|
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||||
|
flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/buildx/store"
|
"context"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
"github.com/docker/cli-docs-tool/annotation"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -18,34 +19,16 @@ type inspectOptions struct {
|
|||||||
raw bool
|
raw bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
if in.format != "" && in.raw {
|
if in.format != "" && in.raw {
|
||||||
return errors.Errorf("format and raw cannot be used together")
|
return errors.Errorf("format and raw cannot be used together")
|
||||||
}
|
}
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
imageopt, err := b.ImageOpt()
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -67,8 +50,9 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
|||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *rootOpts.Builder
|
options.builder = *rootOpts.Builder
|
||||||
return runInspect(dockerCli, options, args[0])
|
return runInspect(cmd.Context(), dockerCli, options, args[0])
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -9,10 +10,12 @@ type RootOptions struct {
|
|||||||
Builder *string
|
Builder *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "imagetools",
|
Use: "imagetools",
|
||||||
Short: "Commands to work on images in registry",
|
Short: "Commands to work on images in registry",
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
|
RunE: rootcmd.RunE,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
|
|||||||
@@ -4,16 +4,20 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/docker/cli/cli/debug"
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,74 +26,48 @@ type inspectOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) error {
|
||||||
ctx := appcontext.Context()
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
builder.WithSkippedValidation(),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
if in.builder != "" {
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng == nil {
|
|
||||||
ng = &store.NodeGroup{
|
|
||||||
Name: "default",
|
|
||||||
Nodes: []store.Node{{
|
|
||||||
Name: "default",
|
|
||||||
Endpoint: "default",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
|
|
||||||
|
|
||||||
var bootNgi *nginfo
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, err = boot(ctx, ngi)
|
ok, err = b.Boot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
bootNgi = ngi
|
|
||||||
if ok {
|
if ok {
|
||||||
ngi = &nginfo{ng: ng}
|
nodes, err = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
err = loadNodeGroupData(ctx, dockerCli, ngi)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||||
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
|
fmt.Fprintf(w, "Name:\t%s\n", b.Name)
|
||||||
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
|
fmt.Fprintf(w, "Driver:\t%s\n", b.Driver)
|
||||||
|
if !b.NodeGroup.LastActivity.IsZero() {
|
||||||
|
fmt.Fprintf(w, "Last Activity:\t%v\n", b.NodeGroup.LastActivity)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if ngi.err != nil {
|
} else if b.Err() != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", ngi.err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", b.Err().Error())
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fmt.Fprintln(w, "")
|
fmt.Fprintln(w, "")
|
||||||
fmt.Fprintln(w, "Nodes:")
|
fmt.Fprintln(w, "Nodes:")
|
||||||
|
|
||||||
for i, n := range ngi.ng.Nodes {
|
for i, n := range nodes {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
fmt.Fprintln(w, "")
|
fmt.Fprintln(w, "")
|
||||||
}
|
}
|
||||||
@@ -104,18 +82,83 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ngi.drivers[i].di.Err; err != nil {
|
if err := n.Err; err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if err := ngi.drivers[i].err; err != nil {
|
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
|
||||||
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
|
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
|
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
||||||
if len(n.Flags) > 0 {
|
if len(n.BuildkitdFlags) > 0 {
|
||||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
fmt.Fprintf(w, "BuildKit daemon flags:\t%s\n", strings.Join(n.BuildkitdFlags, " "))
|
||||||
|
}
|
||||||
|
if nodes[i].Version != "" {
|
||||||
|
fmt.Fprintf(w, "BuildKit version:\t%s\n", nodes[i].Version)
|
||||||
|
}
|
||||||
|
platforms := platformutil.FormatInGroups(n.Node.Platforms, n.Platforms)
|
||||||
|
if len(platforms) > 0 {
|
||||||
|
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platforms, ", "))
|
||||||
|
}
|
||||||
|
if debug.IsEnabled() {
|
||||||
|
fmt.Fprintf(w, "Features:\n")
|
||||||
|
features := nodes[i].Driver.Features(ctx)
|
||||||
|
featKeys := make([]string, 0, len(features))
|
||||||
|
for k := range features {
|
||||||
|
featKeys = append(featKeys, string(k))
|
||||||
|
}
|
||||||
|
sort.Strings(featKeys)
|
||||||
|
for _, k := range featKeys {
|
||||||
|
fmt.Fprintf(w, "\t%s:\t%t\n", k, features[driver.Feature(k)])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(nodes[i].Labels) > 0 {
|
||||||
|
fmt.Fprintf(w, "Labels:\n")
|
||||||
|
for _, k := range sortedKeys(nodes[i].Labels) {
|
||||||
|
v := nodes[i].Labels[k]
|
||||||
|
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes[i].CDIDevices) > 0 {
|
||||||
|
fmt.Fprintf(w, "Devices:\n")
|
||||||
|
for _, dev := range nodes[i].CDIDevices {
|
||||||
|
fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
|
||||||
|
if dev.OnDemand {
|
||||||
|
fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
|
||||||
|
}
|
||||||
|
if len(dev.Annotations) > 0 {
|
||||||
|
fmt.Fprintf(w, "\tAnnotations:\n")
|
||||||
|
for k, v := range dev.Annotations {
|
||||||
|
fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for ri, rule := range nodes[i].GCPolicy {
|
||||||
|
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
||||||
|
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
||||||
|
if len(rule.Filter) > 0 {
|
||||||
|
fmt.Fprintf(w, "\tFilters:\t%s\n", strings.Join(rule.Filter, " "))
|
||||||
|
}
|
||||||
|
if rule.KeepDuration > 0 {
|
||||||
|
fmt.Fprintf(w, "\tKeep Duration:\t%v\n", rule.KeepDuration.String())
|
||||||
|
}
|
||||||
|
if rule.ReservedSpace > 0 {
|
||||||
|
fmt.Fprintf(w, "\tReserved Space:\t%s\n", units.BytesSize(float64(rule.ReservedSpace)))
|
||||||
|
}
|
||||||
|
if rule.MaxUsedSpace > 0 {
|
||||||
|
fmt.Fprintf(w, "\tMax Used Space:\t%s\n", units.BytesSize(float64(rule.MaxUsedSpace)))
|
||||||
|
}
|
||||||
|
if rule.MinFreeSpace > 0 {
|
||||||
|
fmt.Fprintf(w, "\tMin Free Space:\t%s\n", units.BytesSize(float64(rule.MinFreeSpace)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for f, dt := range nodes[i].Files {
|
||||||
|
fmt.Fprintf(w, "File#%s:\n", f)
|
||||||
|
for _, line := range strings.Split(string(dt), "\n") {
|
||||||
|
fmt.Fprintf(w, "\t> %s\n", line)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,8 +180,9 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
options.builder = args[0]
|
options.builder = args[0]
|
||||||
}
|
}
|
||||||
return runInspect(dockerCli, options)
|
return runInspect(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
@@ -146,3 +190,14 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sortedKeys(m map[string]string) []string {
|
||||||
|
s := make([]string, len(m))
|
||||||
|
i := 0
|
||||||
|
for k := range m {
|
||||||
|
s[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
sort.Strings(s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
@@ -14,7 +15,7 @@ import (
|
|||||||
type installOptions struct {
|
type installOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInstall(dockerCli command.Cli, in installOptions) error {
|
func runInstall(_ command.Cli, _ installOptions) error {
|
||||||
dir := config.Dir()
|
dir := config.Dir()
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
return errors.Wrap(err, "could not create docker config")
|
return errors.Wrap(err, "could not create docker config")
|
||||||
@@ -47,6 +48,7 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
return runInstall(dockerCli, options)
|
return runInstall(dockerCli, options)
|
||||||
},
|
},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
// hide builder persistent flag for this command
|
||||||
|
|||||||
451
commands/ls.go
451
commands/ls.go
@@ -2,82 +2,72 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"maps"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lsNameNodeHeader = "NAME/NODE"
|
||||||
|
lsDriverEndpointHeader = "DRIVER/ENDPOINT"
|
||||||
|
lsStatusHeader = "STATUS"
|
||||||
|
lsLastActivityHeader = "LAST ACTIVITY"
|
||||||
|
lsBuildkitHeader = "BUILDKIT"
|
||||||
|
lsPlatformsHeader = "PLATFORMS"
|
||||||
|
|
||||||
|
lsIndent = ` \_ `
|
||||||
|
|
||||||
|
lsDefaultTableFormat = "table {{.Name}}\t{{.DriverEndpoint}}\t{{.Status}}\t{{.Buildkit}}\t{{.Platforms}}"
|
||||||
|
)
|
||||||
|
|
||||||
type lsOptions struct {
|
type lsOptions struct {
|
||||||
|
format string
|
||||||
|
noTrunc bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runLs(dockerCli command.Cli, in lsOptions) error {
|
func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ll, err := txn.List()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
builders := make([]*nginfo, len(ll))
|
builders, err := builder.GetBuilders(dockerCli, txn)
|
||||||
for i, ng := range ll {
|
|
||||||
builders[i] = &nginfo{ng: ng}
|
|
||||||
}
|
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
|
||||||
return contexts[i].Name < contexts[j].Name
|
|
||||||
})
|
|
||||||
for _, c := range contexts {
|
|
||||||
ngi := &nginfo{ng: &store.NodeGroup{
|
|
||||||
Name: c.Name,
|
|
||||||
Nodes: []store.Node{{
|
|
||||||
Name: c.Name,
|
|
||||||
Endpoint: c.Name,
|
|
||||||
}},
|
|
||||||
}}
|
|
||||||
// if a context has the same name as an instance from the store, do not
|
|
||||||
// add it to the builders list. An instance from the store takes
|
|
||||||
// precedence over context builders.
|
|
||||||
if hasNodeGroup(builders, ngi) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
builders = append(builders, ngi)
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
func(b *nginfo) {
|
func(b *builder.Builder) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
err = loadNodeGroupData(ctx, dockerCli, b)
|
_, _ = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if b.err == nil && err != nil {
|
|
||||||
b.err = err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(b)
|
}(b)
|
||||||
@@ -87,51 +77,17 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
currentName := "default"
|
if hasErrors, err := lsPrint(dockerCli, current, builders, in); err != nil {
|
||||||
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
} else if hasErrors {
|
||||||
if current != nil {
|
|
||||||
currentName = current.Name
|
|
||||||
if current.Name == "default" {
|
|
||||||
currentName = current.Nodes[0].Endpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
|
||||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
|
||||||
|
|
||||||
currentSet := false
|
|
||||||
printErr := false
|
|
||||||
for _, b := range builders {
|
|
||||||
if !currentSet && b.ng.Name == currentName {
|
|
||||||
b.ng.Name += " *"
|
|
||||||
currentSet = true
|
|
||||||
}
|
|
||||||
if ok := printngi(w, b); !ok {
|
|
||||||
printErr = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
if printErr {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if b.err != nil {
|
if b.Err() != nil {
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.ng.Name, strings.TrimSpace(b.err.Error()))
|
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error()))
|
||||||
} else {
|
} else {
|
||||||
for idx, n := range b.ng.Nodes {
|
for _, d := range b.Nodes() {
|
||||||
d := b.drivers[idx]
|
if d.Err != nil {
|
||||||
var nodeErr string
|
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error()))
|
||||||
if d.err != nil {
|
|
||||||
nodeErr = d.err.Error()
|
|
||||||
} else if d.di.Err != nil {
|
|
||||||
nodeErr = d.di.Err.Error()
|
|
||||||
}
|
|
||||||
if nodeErr != "" {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.ng.Name, n.Name, strings.TrimSpace(nodeErr))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -141,32 +97,6 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printngi(w io.Writer, ngi *nginfo) (ok bool) {
|
|
||||||
ok = true
|
|
||||||
var err string
|
|
||||||
if ngi.err != nil {
|
|
||||||
ok = false
|
|
||||||
err = "error"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", ngi.ng.Name, ngi.ng.Driver, err)
|
|
||||||
if ngi.err == nil {
|
|
||||||
for idx, n := range ngi.ng.Nodes {
|
|
||||||
d := ngi.drivers[idx]
|
|
||||||
var status string
|
|
||||||
if d.info != nil {
|
|
||||||
status = d.info.Status.String()
|
|
||||||
}
|
|
||||||
if d.err != nil || d.di.Err != nil {
|
|
||||||
ok = false
|
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, d.version, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options lsOptions
|
var options lsOptions
|
||||||
|
|
||||||
@@ -175,12 +105,315 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Short: "List builder instances",
|
Short: "List builder instances",
|
||||||
Args: cli.ExactArgs(0),
|
Args: cli.ExactArgs(0),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runLs(dockerCli, options)
|
return runLs(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
// hide builder persistent flag for this command
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builder.Builder, in lsOptions) (hasErrors bool, _ error) {
|
||||||
|
if in.format == formatter.TableFormatKey {
|
||||||
|
in.format = lsDefaultTableFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.Format(in.format),
|
||||||
|
Trunc: !in.noTrunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(builders, func(i, j int) bool {
|
||||||
|
ierr := builders[i].Err() != nil
|
||||||
|
jerr := builders[j].Err() != nil
|
||||||
|
if ierr && !jerr {
|
||||||
|
return false
|
||||||
|
} else if !ierr && jerr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return i < j
|
||||||
|
})
|
||||||
|
|
||||||
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
|
for _, b := range builders {
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: ctx.Format,
|
||||||
|
trunc: ctx.Trunc,
|
||||||
|
Builder: &lsBuilder{
|
||||||
|
Builder: b,
|
||||||
|
Current: b.Name == current.Name,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b.Err() != nil {
|
||||||
|
if ctx.Format.IsTable() {
|
||||||
|
hasErrors = true
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ctx.Format.IsJSON() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, n := range b.Nodes() {
|
||||||
|
if n.Err != nil {
|
||||||
|
if ctx.Format.IsTable() {
|
||||||
|
hasErrors = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: ctx.Format,
|
||||||
|
trunc: ctx.Trunc,
|
||||||
|
Builder: &lsBuilder{
|
||||||
|
Builder: b,
|
||||||
|
Current: b.Name == current.Name,
|
||||||
|
},
|
||||||
|
node: n,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lsCtx := lsContext{}
|
||||||
|
lsCtx.Header = formatter.SubHeaderContext{
|
||||||
|
"Name": lsNameNodeHeader,
|
||||||
|
"DriverEndpoint": lsDriverEndpointHeader,
|
||||||
|
"LastActivity": lsLastActivityHeader,
|
||||||
|
"Status": lsStatusHeader,
|
||||||
|
"Buildkit": lsBuildkitHeader,
|
||||||
|
"Platforms": lsPlatformsHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
return hasErrors, ctx.Write(&lsCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsBuilder struct {
|
||||||
|
*builder.Builder
|
||||||
|
Current bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsContext struct {
|
||||||
|
formatter.HeaderContext
|
||||||
|
Builder *lsBuilder
|
||||||
|
|
||||||
|
format formatter.Format
|
||||||
|
trunc bool
|
||||||
|
node builder.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(c.Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Name() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
name := c.Builder.Name
|
||||||
|
if c.Builder.Current && c.format.IsTable() {
|
||||||
|
name += "*"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
if c.format.IsTable() {
|
||||||
|
return lsIndent + c.node.Name
|
||||||
|
}
|
||||||
|
return c.node.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) DriverEndpoint() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return c.Builder.Driver
|
||||||
|
}
|
||||||
|
if c.format.IsTable() {
|
||||||
|
return lsIndent + c.node.Endpoint
|
||||||
|
}
|
||||||
|
return c.node.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) LastActivity() string {
|
||||||
|
if c.node.Name != "" || c.Builder.LastActivity.IsZero() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return c.Builder.LastActivity.UTC().Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Status() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
if c.Builder.Err() != nil {
|
||||||
|
return "error"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if c.node.Err != nil {
|
||||||
|
return "error"
|
||||||
|
}
|
||||||
|
if c.node.DriverInfo != nil {
|
||||||
|
return c.node.DriverInfo.Status.String()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Buildkit() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return c.node.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Platforms() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
pfs := platformutil.FormatInGroups(c.node.Node.Platforms, c.node.Platforms)
|
||||||
|
if c.trunc && c.format.IsTable() {
|
||||||
|
return truncPlatforms(pfs, 4).String()
|
||||||
|
}
|
||||||
|
return strings.Join(pfs, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Error() string {
|
||||||
|
if c.node.Name != "" && c.node.Err != nil {
|
||||||
|
return c.node.Err.Error()
|
||||||
|
} else if err := c.Builder.Err(); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var truncMajorPlatforms = []string{
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/arm",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/s390x",
|
||||||
|
"linux/riscv64",
|
||||||
|
"linux/mips64",
|
||||||
|
}
|
||||||
|
|
||||||
|
type truncatedPlatforms struct {
|
||||||
|
res map[string][]string
|
||||||
|
input []string
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp truncatedPlatforms) List() map[string][]string {
|
||||||
|
return tp.res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp truncatedPlatforms) String() string {
|
||||||
|
var out []string
|
||||||
|
var count int
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for k := range tp.res {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
for _, mpf := range truncMajorPlatforms {
|
||||||
|
if tpf, ok := tp.res[mpf]; ok {
|
||||||
|
seen[mpf] = struct{}{}
|
||||||
|
if len(tpf) == 1 {
|
||||||
|
out = append(out, tpf[0])
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
hasPreferredPlatform := false
|
||||||
|
for _, pf := range tpf {
|
||||||
|
if strings.HasSuffix(pf, "*") {
|
||||||
|
hasPreferredPlatform = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mainpf := mpf
|
||||||
|
if hasPreferredPlatform {
|
||||||
|
mainpf += "*"
|
||||||
|
}
|
||||||
|
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tpf)))
|
||||||
|
count += len(tpf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mpf := range keys {
|
||||||
|
if len(out) >= tp.max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, ok := seen[mpf]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(tp.res[mpf]) == 1 {
|
||||||
|
out = append(out, tp.res[mpf][0])
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
hasPreferredPlatform := false
|
||||||
|
for _, pf := range tp.res[mpf] {
|
||||||
|
if strings.HasSuffix(pf, "*") {
|
||||||
|
hasPreferredPlatform = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mainpf := mpf
|
||||||
|
if hasPreferredPlatform {
|
||||||
|
mainpf += "*"
|
||||||
|
}
|
||||||
|
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tp.res[mpf])))
|
||||||
|
count += len(tp.res[mpf])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
left := len(tp.input) - count
|
||||||
|
if left > 0 {
|
||||||
|
out = append(out, fmt.Sprintf("(%d more)", left))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(out, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncPlatforms(pfs []string, max int) truncatedPlatforms {
|
||||||
|
res := make(map[string][]string)
|
||||||
|
for _, mpf := range truncMajorPlatforms {
|
||||||
|
for _, pf := range pfs {
|
||||||
|
if len(res) >= max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pp.OS+"/"+pp.Architecture == mpf {
|
||||||
|
res[mpf] = append(res[mpf], pf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
left := make(map[string][]string)
|
||||||
|
for _, pf := range pfs {
|
||||||
|
if len(res) >= max {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ppf := strings.TrimSuffix(pp.OS+"/"+pp.Architecture, "*")
|
||||||
|
if _, ok := res[ppf]; !ok {
|
||||||
|
left[ppf] = append(left[ppf], pf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maps.Copy(res, left)
|
||||||
|
return truncatedPlatforms{
|
||||||
|
res: res,
|
||||||
|
input: pfs,
|
||||||
|
max: max,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
174
commands/ls_test.go
Normal file
174
commands/ls_test.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTruncPlatforms(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
platforms []string
|
||||||
|
max int
|
||||||
|
expectedList map[string][]string
|
||||||
|
expectedOut string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "arm64 preferred and emulated",
|
||||||
|
platforms: []string{"linux/arm64*", "linux/amd64", "linux/amd64/v2", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/386", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v2",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64*",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64 (+2), linux/arm64*, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "riscv64 preferred only",
|
||||||
|
platforms: []string{"linux/riscv64*"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/riscv64": {
|
||||||
|
"linux/riscv64*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/riscv64*",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "amd64 no preferred and emulated",
|
||||||
|
platforms: []string{"linux/amd64", "linux/amd64/v2", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v2",
|
||||||
|
"linux/amd64/v3",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
}},
|
||||||
|
expectedOut: "linux/amd64 (+3), linux/arm64, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "amd64 no preferred",
|
||||||
|
platforms: []string{"linux/amd64", "linux/386"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/386": {
|
||||||
|
"linux/386",
|
||||||
|
},
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64, linux/386",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "arm64 no preferred",
|
||||||
|
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64, linux/arm (+2)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all preferred",
|
||||||
|
platforms: []string{"darwin/arm64*", "linux/arm64*", "linux/arm/v5*", "linux/arm/v6*", "linux/arm/v7*", "windows/arm64*"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"darwin/arm64": {
|
||||||
|
"darwin/arm64*",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v5*",
|
||||||
|
"linux/arm/v6*",
|
||||||
|
"linux/arm/v7*",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64*",
|
||||||
|
},
|
||||||
|
"windows/arm64": {
|
||||||
|
"windows/arm64*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64*, linux/arm* (+3), darwin/arm64*, windows/arm64*",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no major preferred",
|
||||||
|
platforms: []string{"linux/amd64/v2*", "linux/arm/v6*", "linux/mips64le*", "linux/amd64", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64", "linux/arm/v7"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/amd64": {
|
||||||
|
"linux/amd64/v2*",
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/amd64/v3",
|
||||||
|
},
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v6*",
|
||||||
|
"linux/arm/v7",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/ppc64le": {
|
||||||
|
"linux/ppc64le",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/amd64* (+3), linux/arm64, linux/arm* (+2), linux/ppc64le, (5 more)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no major with multiple variants",
|
||||||
|
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6", "linux/mips64le/softfloat", "linux/mips64le/hardfloat"},
|
||||||
|
max: 4,
|
||||||
|
expectedList: map[string][]string{
|
||||||
|
"linux/arm": {
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm/v6",
|
||||||
|
},
|
||||||
|
"linux/arm64": {
|
||||||
|
"linux/arm64",
|
||||||
|
},
|
||||||
|
"linux/mips64le": {
|
||||||
|
"linux/mips64le/softfloat",
|
||||||
|
"linux/mips64le/hardfloat",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedOut: "linux/arm64, linux/arm (+2), linux/mips64le (+2)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tpfs := truncPlatforms(tt.platforms, tt.max)
|
||||||
|
assert.Equal(t, tt.expectedList, tpfs.List())
|
||||||
|
assert.Equal(t, tt.expectedOut, tpfs.String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
|
||||||
)
|
|
||||||
|
|
||||||
func printResult(f *build.PrintFunc, res map[string]string) error {
|
|
||||||
switch f.Name {
|
|
||||||
case "outline":
|
|
||||||
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
|
||||||
case "targets":
|
|
||||||
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
|
||||||
case "subrequests.describe":
|
|
||||||
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
|
||||||
default:
|
|
||||||
if dt, ok := res["result.txt"]; ok {
|
|
||||||
fmt.Print(dt)
|
|
||||||
} else {
|
|
||||||
log.Printf("%s %+v", f, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type printFunc func([]byte, io.Writer) error
|
|
||||||
|
|
||||||
func printValue(printer printFunc, version string, format string, res map[string]string) error {
|
|
||||||
if format == "json" {
|
|
||||||
fmt.Fprintln(os.Stdout, res["result.json"])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
|
|
||||||
// structure is too new and we don't know how to print it
|
|
||||||
fmt.Fprint(os.Stdout, res["result.txt"])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return printer([]byte(res["result.json"]), os.Stdout)
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user