mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-09-09 04:19:07 +08:00
Compare commits
967 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
257815a6fb | ||
|
|
dbccfa60a7 | ||
|
|
59cb959195 | ||
|
|
dd0d53efd5 | ||
|
|
e8ceaad0a8 | ||
|
|
e5a6b8e140 | ||
|
|
78c8c28cf5 | ||
|
|
4173281da3 | ||
|
|
40ef3446f5 | ||
|
|
7213b2a814 | ||
|
|
9cfa25ab40 | ||
|
|
6db3444a25 | ||
|
|
15e930b691 | ||
|
|
abc5eaed88 | ||
|
|
ad9a5196b3 | ||
|
|
db117855da | ||
|
|
ecfe98df6f | ||
|
|
479177eaf9 | ||
|
|
194f523fe1 | ||
|
|
29d367bdd4 | ||
|
|
ed341bafd0 | ||
|
|
c887c2c62a | ||
|
|
7c481aae20 | ||
|
|
f0f8876902 | ||
|
|
fa1d19bb1e | ||
|
|
7bea00f3dd | ||
|
|
83d5c0c61b | ||
|
|
e58a1d35d1 | ||
|
|
b920b08ad3 | ||
|
|
f369377d74 | ||
|
|
b7486e5cd5 | ||
|
|
5ecff53e0c | ||
|
|
48faab5890 | ||
|
|
f77866f5b4 | ||
|
|
203fd8aee5 | ||
|
|
806ccd3545 | ||
|
|
d6e030eda7 | ||
|
|
96eb69aea4 | ||
|
|
d1d8d6e19c | ||
|
|
dc7f679ab1 | ||
|
|
e403ab2d63 | ||
|
|
b6a2c96926 | ||
|
|
7a7a9c8e01 | ||
|
|
fa8f859159 | ||
|
|
8411a763d9 | ||
|
|
6c5279da54 | ||
|
|
0e64eb4f8b | ||
|
|
adbcc2225e | ||
|
|
e00efeb399 | ||
|
|
d03c13b947 | ||
|
|
4787b5c046 | ||
|
|
1c66f293c7 | ||
|
|
246a36d463 | ||
|
|
a4adae3d6b | ||
|
|
36cd88f8ca | ||
|
|
07a85a544b | ||
|
|
f64b85afe6 | ||
|
|
4b27fb3022 | ||
|
|
38a8261f05 | ||
|
|
a3e6f4be15 | ||
|
|
6467a86427 | ||
|
|
58571ff6d6 | ||
|
|
71174c3041 | ||
|
|
16860e6dd2 | ||
|
|
8e02b1a2f7 | ||
|
|
531c6d4ff1 | ||
|
|
238a3e03dd | ||
|
|
9a0c320588 | ||
|
|
acf0216292 | ||
|
|
5a50d13641 | ||
|
|
2810f20f3a | ||
|
|
e2f6808457 | ||
|
|
39bbb9e478 | ||
|
|
771f0139ac | ||
|
|
6034c58285 | ||
|
|
199890ff51 | ||
|
|
d391b1d3e6 | ||
|
|
f4da6b8f69 | ||
|
|
386d599309 | ||
|
|
d130f8ef0a | ||
|
|
b691a10379 | ||
|
|
e628f9ea14 | ||
|
|
0fb0b6db0d | ||
|
|
6efb1d7cdc | ||
|
|
bc2748da59 | ||
|
|
d4c4632cf6 | ||
|
|
cdd46af015 | ||
|
|
b62d64b2b5 | ||
|
|
64171cb13e | ||
|
|
f28dff7598 | ||
|
|
3d542f3d31 | ||
|
|
30dbdcfa3e | ||
|
|
16518091cd | ||
|
|
897fc91802 | ||
|
|
c4d3011a98 | ||
|
|
a47f761c55 | ||
|
|
aa35c954f3 | ||
|
|
56df4e98a0 | ||
|
|
9f00a9eafa | ||
|
|
56cb197c0a | ||
|
|
466006849a | ||
|
|
738f5ee9db | ||
|
|
9b49cf3ae6 | ||
|
|
bd0b425734 | ||
|
|
7823a2dc01 | ||
|
|
cedbc5d68d | ||
|
|
12d431d1b4 | ||
|
|
ca452c47d8 | ||
|
|
d8f26f79ed | ||
|
|
4304d388ef | ||
|
|
96509847b9 | ||
|
|
52bb668085 | ||
|
|
85cf3bace9 | ||
|
|
b92bfb53d2 | ||
|
|
6c929a45c7 | ||
|
|
d296d5d46a | ||
|
|
6e433da23f | ||
|
|
3005743f7c | ||
|
|
d64d3a4caf | ||
|
|
0d37d68efd | ||
|
|
03a691a0a5 | ||
|
|
fa392a2dca | ||
|
|
470e45e599 | ||
|
|
2a2648b1db | ||
|
|
ac930bda69 | ||
|
|
6791ecb628 | ||
|
|
d717237e4f | ||
|
|
ee642ecc4c | ||
|
|
06d96d665e | ||
|
|
dc83501a5b | ||
|
|
0f74f9a794 | ||
|
|
6d6adc11a1 | ||
|
|
68076909b9 | ||
|
|
7957b73a30 | ||
|
|
1dceb49a27 | ||
|
|
b96ad59f64 | ||
|
|
50aa895477 | ||
|
|
74374ea418 | ||
|
|
6bbe59697a | ||
|
|
c51004e2e4 | ||
|
|
8535c6b455 | ||
|
|
153e5ed274 | ||
|
|
cc097db675 | ||
|
|
35313e865f | ||
|
|
233b869c63 | ||
|
|
7460f049f2 | ||
|
|
8f4c8b094a | ||
|
|
8da28574b0 | ||
|
|
7e49141c4e | ||
|
|
5ec703ba10 | ||
|
|
1ffc6f1d58 | ||
|
|
f65631546d | ||
|
|
6fc19c4024 | ||
|
|
5656c98133 | ||
|
|
263a9ddaee | ||
|
|
1774aa0cf0 | ||
|
|
7b80ad7069 | ||
|
|
c0c4d7172b | ||
|
|
e498ba9c27 | ||
|
|
2e7e7abe42 | ||
|
|
048ef1fbf8 | ||
|
|
cbe7901667 | ||
|
|
f374f64d2f | ||
|
|
4be2259719 | ||
|
|
6627f315cb | ||
|
|
19d838a3f4 | ||
|
|
17878d641e | ||
|
|
63eb73d9cf | ||
|
|
59a0ffcf83 | ||
|
|
2b17f277a1 | ||
|
|
ea7c8e83d2 | ||
|
|
9358c45b46 | ||
|
|
cfb7fc4fb5 | ||
|
|
d4b112ab05 | ||
|
|
f7a32361ea | ||
|
|
af902caeaa | ||
|
|
04000db8da | ||
|
|
b8da14166c | ||
|
|
c1f680df14 | ||
|
|
b6482ab6bb | ||
|
|
6f45b0ea06 | ||
|
|
3971361ed2 | ||
|
|
818045482e | ||
|
|
f8e1746d0d | ||
|
|
92a6799514 | ||
|
|
9358f84668 | ||
|
|
dbdd3601eb | ||
|
|
a3c8a72b54 | ||
|
|
4c3af9becf | ||
|
|
d8c9ebde1f | ||
|
|
01a50aac42 | ||
|
|
f7bcafed21 | ||
|
|
e5ded4b2de | ||
|
|
6ef443de41 | ||
|
|
076e19d0ce | ||
|
|
5599699d29 | ||
|
|
d155747029 | ||
|
|
9cebd0c80f | ||
|
|
7b1ec7211d | ||
|
|
689fd74104 | ||
|
|
0dfd315daa | ||
|
|
9b100c2552 | ||
|
|
92aaaa8f67 | ||
|
|
6111d9a00d | ||
|
|
310aaf1891 | ||
|
|
6c7e65c789 | ||
|
|
66b0abf078 | ||
|
|
6efa26c2de | ||
|
|
5b726afa5e | ||
|
|
009f318bbd | ||
|
|
9f7c8ea3fb | ||
|
|
be12199eb9 | ||
|
|
94355517c4 | ||
|
|
cb1be7214a | ||
|
|
f42a4a1e94 | ||
|
|
4d7365018c | ||
|
|
3d0951b800 | ||
|
|
bcd04d5a64 | ||
|
|
b00001d8ac | ||
|
|
31187735de | ||
|
|
3373a27f1f | ||
|
|
56698805a9 | ||
|
|
4c2e0c4307 | ||
|
|
fb6a3178c9 | ||
|
|
8ca18dee2d | ||
|
|
917d2f4a0a | ||
|
|
366328ba6a | ||
|
|
5f822b36d3 | ||
|
|
e423d096a6 | ||
|
|
927fb6731c | ||
|
|
314ca32446 | ||
|
|
3b25e3fa5c | ||
|
|
41d369120b | ||
|
|
56ffe55f81 | ||
|
|
6d5823beb1 | ||
|
|
c116af7b82 | ||
|
|
fb130243f8 | ||
|
|
29c8107b85 | ||
|
|
ee3baa54f7 | ||
|
|
9de95d81eb | ||
|
|
d3a53189f7 | ||
|
|
0496dae9d5 | ||
|
|
40fcf992b1 | ||
|
|
85c25f719c | ||
|
|
875e4cd52e | ||
|
|
24cedc6c0f | ||
|
|
59f52c9505 | ||
|
|
1e916ae6c6 | ||
|
|
d342cb9d03 | ||
|
|
9fdc99dc76 | ||
|
|
ab835fd904 | ||
|
|
87efbd43b5 | ||
|
|
39db6159f9 | ||
|
|
922328cbaf | ||
|
|
aa0f90fdd6 | ||
|
|
82b6826cd7 | ||
|
|
1e3aec1ae2 | ||
|
|
cfef22ddf0 | ||
|
|
9e5ba66553 | ||
|
|
9ceda78057 | ||
|
|
747b75a217 | ||
|
|
d8de5bb345 | ||
|
|
eff1850d53 | ||
|
|
a24043e9f1 | ||
|
|
0902294e1a | ||
|
|
ef4a165e48 | ||
|
|
89810dc998 | ||
|
|
250cd44d70 | ||
|
|
5afb210d43 | ||
|
|
03f84d2e83 | ||
|
|
945e774a02 | ||
|
|
947d6023e4 | ||
|
|
c58599ca50 | ||
|
|
f30e143428 | ||
|
|
53b7cbc5cb | ||
|
|
9a30215886 | ||
|
|
b1cb658a31 | ||
|
|
bc83ecb538 | ||
|
|
ceaa4534f9 | ||
|
|
9b6c4103af | ||
|
|
4549283f44 | ||
|
|
b2e907d5c2 | ||
|
|
7427adb9b0 | ||
|
|
1a93bbd3a5 | ||
|
|
1f28985d20 | ||
|
|
33a5528003 | ||
|
|
7bfae2b809 | ||
|
|
117c9016e1 | ||
|
|
388af3576a | ||
|
|
2061550bc1 | ||
|
|
abf6c77d91 | ||
|
|
9ad116aa8e | ||
|
|
e3d5e64ec9 | ||
|
|
0808747add | ||
|
|
2e7da01560 | ||
|
|
38d7d36f0a | ||
|
|
55c86543ca | ||
|
|
f98ef00ec7 | ||
|
|
b948b07e2d | ||
|
|
17c0a3794b | ||
|
|
c0a986b43b | ||
|
|
781dcbd196 | ||
|
|
37c4ff0944 | ||
|
|
6211f56b8d | ||
|
|
cc9ea87142 | ||
|
|
035236a5ed | ||
|
|
99777eaf34 | ||
|
|
cf68b5b878 | ||
|
|
3f1aaa68d5 | ||
|
|
f6830f3b86 | ||
|
|
4fc4bc07ae | ||
|
|
f6e57cf5b5 | ||
|
|
b77648d5f8 | ||
|
|
afcb609966 | ||
|
|
946e0a5d74 | ||
|
|
c4db5b252a | ||
|
|
8afeb56a3b | ||
|
|
fd801a12c1 | ||
|
|
2f98e6f3ac | ||
|
|
224c6a59bf | ||
|
|
cbb75bbfd5 | ||
|
|
72085dbdf0 | ||
|
|
480b53f529 | ||
|
|
f8c6a97edc | ||
|
|
d4f088e689 | ||
|
|
db3a8ad7ca | ||
|
|
1d88c4b169 | ||
|
|
6d95fb586e | ||
|
|
1fb5d2a9ee | ||
|
|
ba264138d6 | ||
|
|
6375dc7230 | ||
|
|
9cc6c7df70 | ||
|
|
7ea5cffb98 | ||
|
|
d2d21577fb | ||
|
|
e344e2251b | ||
|
|
833fe3b04f | ||
|
|
d0cc9ed0cb | ||
|
|
b30566438b | ||
|
|
ec98985b4e | ||
|
|
9428447cd2 | ||
|
|
6112c41637 | ||
|
|
a727de7d5f | ||
|
|
4a8fcb7aa0 | ||
|
|
771e66bf7a | ||
|
|
7e0ab1a003 | ||
|
|
e3e16ad088 | ||
|
|
f2823515db | ||
|
|
5ac9b78384 | ||
|
|
fbb0f9b424 | ||
|
|
699fa43f7f | ||
|
|
bdf27ee797 | ||
|
|
171fcbeb69 | ||
|
|
370a5aa127 | ||
|
|
13653fb84d | ||
|
|
1b16594f4a | ||
|
|
3905e8cf06 | ||
|
|
177b95c972 | ||
|
|
74fdbb5e7f | ||
|
|
ac331d3569 | ||
|
|
07c9b45bae | ||
|
|
b91957444b | ||
|
|
46c44c58ae | ||
|
|
6aed54c35a | ||
|
|
126fe653c7 | ||
|
|
f0cbc95eaf | ||
|
|
1a0f9fa96c | ||
|
|
df7a3db947 | ||
|
|
d294232cb5 | ||
|
|
0a7f5c4d94 | ||
|
|
5777d980b5 | ||
|
|
46cf94092c | ||
|
|
da3435ed3a | ||
|
|
3e90cc4b84 | ||
|
|
6418669e75 | ||
|
|
188495aa93 | ||
|
|
54a5c1ff93 | ||
|
|
2e2f9f571f | ||
|
|
d2ac1f2d6e | ||
|
|
7e3acad9f4 | ||
|
|
e04637cf34 | ||
|
|
b9c5f9f1ee | ||
|
|
92ab188781 | ||
|
|
dd4d52407f | ||
|
|
7432b483ce | ||
|
|
6e3164dc6f | ||
|
|
2fdb1682f8 | ||
|
|
7f1eaa2a8a | ||
|
|
fbddc9ebea | ||
|
|
d347499112 | ||
|
|
b1fb67f44a | ||
|
|
a9575a872a | ||
|
|
60f48059a7 | ||
|
|
ffff87be03 | ||
|
|
0a3e5e5257 | ||
|
|
151b0de8f2 | ||
|
|
e40c630758 | ||
|
|
ea3338c3f3 | ||
|
|
744c055560 | ||
|
|
ca0b583f5a | ||
|
|
e7f2da9c4f | ||
|
|
d805c784f2 | ||
|
|
a2866b79e3 | ||
|
|
12e1f65eb3 | ||
|
|
0d6b3a9d1d | ||
|
|
4b3c3c8401 | ||
|
|
ccc314a823 | ||
|
|
dc4b4c36bd | ||
|
|
5c29e6e26e | ||
|
|
6a0d5b771f | ||
|
|
59cc10767e | ||
|
|
b61b29f603 | ||
|
|
7cfef05661 | ||
|
|
4d39259f8e | ||
|
|
15fd39ebec | ||
|
|
a7d59ae332 | ||
|
|
e18a2f6e58 | ||
|
|
38fbd9a85c | ||
|
|
84ddbc2b3b | ||
|
|
b4799f9d16 | ||
|
|
7cded6b33b | ||
|
|
1b36bd0c4a | ||
|
|
7dc5639216 | ||
|
|
858e347306 | ||
|
|
adb9bc86e5 | ||
|
|
ef2e30deba | ||
|
|
c690d460e8 | ||
|
|
35781a6c78 | ||
|
|
de5efcb03b | ||
|
|
5c89004bb6 | ||
|
|
8abef59087 | ||
|
|
4999908fbc | ||
|
|
4af0ed5159 | ||
|
|
a4a8846e46 | ||
|
|
520dc5968a | ||
|
|
324afe60ad | ||
|
|
c0c3a55fca | ||
|
|
2a30229916 | ||
|
|
ed76661b0d | ||
|
|
a0cce9b31e | ||
|
|
d410597f5a | ||
|
|
9016d85718 | ||
|
|
2565c74a89 | ||
|
|
eab5cccbb4 | ||
|
|
e2be765e7b | ||
|
|
276dd5150f | ||
|
|
5c69fa267f | ||
|
|
b240a00def | ||
|
|
a8af6fa013 | ||
|
|
7eb3dfbd22 | ||
|
|
4b24f66a10 | ||
|
|
8d5b967f2d | ||
|
|
8842e19869 | ||
|
|
a0ce8bec97 | ||
|
|
84d79df93b | ||
|
|
df4b13320d | ||
|
|
bb511110d6 | ||
|
|
47cf4a5dbe | ||
|
|
cfbed42fa7 | ||
|
|
ff27ab7e86 | ||
|
|
5655e5e2b6 | ||
|
|
4b516af1f6 | ||
|
|
b1490ed5ce | ||
|
|
ea830c9758 | ||
|
|
8f576e5790 | ||
|
|
4327ee73b1 | ||
|
|
70a28fed12 | ||
|
|
fc22d39d6d | ||
|
|
1cc5e39cb8 | ||
|
|
1815e4d9b2 | ||
|
|
2ec1dbd1b6 | ||
|
|
a6163470b7 | ||
|
|
3dfb102f82 | ||
|
|
253cbee5c7 | ||
|
|
c1dfa74b98 | ||
|
|
647491dd99 | ||
|
|
9a71895a48 | ||
|
|
abff444562 | ||
|
|
1d0b542b1b | ||
|
|
6c485a98be | ||
|
|
9ebfde4897 | ||
|
|
e4ee2ca1fd | ||
|
|
849456c198 | ||
|
|
9a2536dd0d | ||
|
|
a03263acf8 | ||
|
|
0c0dcb7c8c | ||
|
|
9bce433154 | ||
|
|
04f0fc5871 | ||
|
|
e7da2b0686 | ||
|
|
eab565afe7 | ||
|
|
7d952441ea | ||
|
|
835a6b1096 | ||
|
|
e273a53c88 | ||
|
|
dcdcce6c52 | ||
|
|
c5b4ce9e7b | ||
|
|
8f484f6ac1 | ||
|
|
b748185f48 | ||
|
|
a6228ed78f | ||
|
|
fcbe2803c8 | ||
|
|
83c30c6c5a | ||
|
|
8db86e4031 | ||
|
|
e705cafcd5 | ||
|
|
32f17b0de1 | ||
|
|
d40c4bb046 | ||
|
|
25f8011825 | ||
|
|
d0f9655aa2 | ||
|
|
ce9a486a0e | ||
|
|
85abcc413e | ||
|
|
e5acb010c9 | ||
|
|
79f50ad924 | ||
|
|
5723ceefb6 | ||
|
|
95185e9525 | ||
|
|
e423a67f7b | ||
|
|
545a5c97c6 | ||
|
|
625d90b983 | ||
|
|
9999fc63e8 | ||
|
|
303e509bbf | ||
|
|
ae0a5e495a | ||
|
|
2edb7a04a9 | ||
|
|
a0599c1c31 | ||
|
|
eedf9f10e8 | ||
|
|
d891634fc6 | ||
|
|
af75d0bd7d | ||
|
|
e008b846bb | ||
|
|
fd11d93381 | ||
|
|
aa518f9b88 | ||
|
|
b16bd02f95 | ||
|
|
69bd408964 | ||
|
|
d8e9c7f5b5 | ||
|
|
fd54daf184 | ||
|
|
9057bd27af | ||
|
|
5a466918f9 | ||
|
|
56fc68eb7e | ||
|
|
ccfcf4bc37 | ||
|
|
560eaf0e78 | ||
|
|
daaa8f2482 | ||
|
|
97052cf203 | ||
|
|
2eccaadce5 | ||
|
|
aa4317bfce | ||
|
|
953cbf6696 | ||
|
|
414f215929 | ||
|
|
698eb840a3 | ||
|
|
714b85aaaf | ||
|
|
fb604d4b57 | ||
|
|
73d8969158 | ||
|
|
64e2b2532a | ||
|
|
c2befc0c12 | ||
|
|
345551ae0d | ||
|
|
97e8fa7aaf | ||
|
|
cdfc35d0b6 | ||
|
|
ce66d8830d | ||
|
|
fe08cf2981 | ||
|
|
c9d1c41d20 | ||
|
|
bda968ad5d | ||
|
|
481384b185 | ||
|
|
67d9385ce0 | ||
|
|
598bc16e5d | ||
|
|
760244ee3e | ||
|
|
d0177c6da3 | ||
|
|
8f8ed68b61 | ||
|
|
981cc8c2aa | ||
|
|
9822409b67 | ||
|
|
328666dc6a | ||
|
|
42d2719b08 | ||
|
|
3b33ac48d2 | ||
|
|
e0303dd65a | ||
|
|
dab7af617a | ||
|
|
0326d2a5b1 | ||
|
|
b4c81a4d27 | ||
|
|
7b3c4fc714 | ||
|
|
43ed470208 | ||
|
|
089982153f | ||
|
|
7393650008 | ||
|
|
b36c5196dd | ||
|
|
1484862a50 | ||
|
|
e5c3fa5293 | ||
|
|
2c58e6003f | ||
|
|
30ae5ceb6e | ||
|
|
6ffb77dcda | ||
|
|
2c1f46450a | ||
|
|
052f279de7 | ||
|
|
89684021b3 | ||
|
|
95bdecc145 | ||
|
|
082d5d70b2 | ||
|
|
5b75930a6d | ||
|
|
e41ab8d10d | ||
|
|
4b408c79fe | ||
|
|
cff7baff1c | ||
|
|
5130700981 | ||
|
|
13beda8b11 | ||
|
|
8babd5a147 | ||
|
|
cb856682e9 | ||
|
|
c65b7ed24f | ||
|
|
2c3d7dab3f | ||
|
|
13467c1f5d | ||
|
|
d0c4bed484 | ||
|
|
dbaad32f49 | ||
|
|
528e3ba259 | ||
|
|
1ff261d38e | ||
|
|
bef5d567b0 | ||
|
|
da95d9f0ca | ||
|
|
7206e2d179 | ||
|
|
736094794c | ||
|
|
a399a97949 | ||
|
|
62a416fe12 | ||
|
|
f6564c3147 | ||
|
|
b49911416c | ||
|
|
22c2538466 | ||
|
|
1861405b1e | ||
|
|
c9aeca19ce | ||
|
|
59827f5c27 | ||
|
|
827622421e | ||
|
|
f0c5dfaf48 | ||
|
|
703c765ec8 | ||
|
|
fb2c62a038 | ||
|
|
eabbee797b | ||
|
|
7e4021a43d | ||
|
|
2478f300aa | ||
|
|
620c57c86c | ||
|
|
8bea1cb417 | ||
|
|
147c7135b0 | ||
|
|
650a7af0ae | ||
|
|
4f738020fd | ||
|
|
d852568a29 | ||
|
|
68c3ac4f66 | ||
|
|
38afdf1f52 | ||
|
|
b2e723e2a3 | ||
|
|
02c2073feb | ||
|
|
61dff684ad | ||
|
|
78adfc80a9 | ||
|
|
7c590ecb9a | ||
|
|
24e043e375 | ||
|
|
7094eb86c9 | ||
|
|
81ea718ea4 | ||
|
|
9060cab077 | ||
|
|
3cd6d8d6e4 | ||
|
|
ba43fe08f4 | ||
|
|
6b63e7e3de | ||
|
|
57d737a13c | ||
|
|
671347dc35 | ||
|
|
02bc4e8992 | ||
|
|
1cdefbe901 | ||
|
|
7694f0b9d8 | ||
|
|
fa9126c61f | ||
|
|
ebae070f7e | ||
|
|
617f538cb3 | ||
|
|
0f45b629ad | ||
|
|
ac5b3241b1 | ||
|
|
ee24a36c4f | ||
|
|
8484fcdd57 | ||
|
|
45deb29f09 | ||
|
|
6641167e7d | ||
|
|
9f4987997c | ||
|
|
8337c25fa4 | ||
|
|
6b048e2316 | ||
|
|
54a1f0f0ea | ||
|
|
57dc45774a | ||
|
|
9d8ac1ce2d | ||
|
|
0a0252d9b3 | ||
|
|
c6535e9675 | ||
|
|
d762c76a68 | ||
|
|
1091707bd5 | ||
|
|
a4c392f4db | ||
|
|
e4880c5dd1 | ||
|
|
b2510c6b94 | ||
|
|
5b5c4c8c9d | ||
|
|
ceb5bc807c | ||
|
|
b2f705ad71 | ||
|
|
6028094e6b | ||
|
|
9516ce8e25 | ||
|
|
d82637582c | ||
|
|
1e80c70990 | ||
|
|
54032316f9 | ||
|
|
aac7a47469 | ||
|
|
aa0aeac297 | ||
|
|
cec4496d3b | ||
|
|
9368ecb67e | ||
|
|
20c947990c | ||
|
|
eeeff1cf23 | ||
|
|
752680e289 | ||
|
|
5bf02d9f7b | ||
|
|
0962fdbb04 | ||
|
|
1f5562315b | ||
|
|
a102d33738 | ||
|
|
940e0a4a3c | ||
|
|
a978b2b7a3 | ||
|
|
1326634c7d | ||
|
|
7a724ac445 | ||
|
|
55e164a540 | ||
|
|
707ae87060 | ||
|
|
cb37886658 | ||
|
|
c855277d53 | ||
|
|
898a8eeddf | ||
|
|
c857eaa380 | ||
|
|
55db25c21c | ||
|
|
f353814390 | ||
|
|
271a467612 | ||
|
|
b3b8c62ad4 | ||
|
|
eacf2bdf3d | ||
|
|
d537b9e418 | ||
|
|
616fb3e55c | ||
|
|
da5f853b44 | ||
|
|
4932eecc3f | ||
|
|
7f93616ff1 | ||
|
|
ab58333311 | ||
|
|
9efaa2793d | ||
|
|
80aa28f75c | ||
|
|
8819ac1b65 | ||
|
|
0408f3ac45 | ||
|
|
7683ef9137 | ||
|
|
3f423468df | ||
|
|
ff8bca206b | ||
|
|
08a70ecdcc | ||
|
|
d83da63320 | ||
|
|
639e0bc5ed | ||
|
|
d0a9a81e2e | ||
|
|
de1a560f07 | ||
|
|
e168fd826c | ||
|
|
2f1b7a0131 | ||
|
|
f3871b158f | ||
|
|
deb9dbe9bb | ||
|
|
6f71ea8904 | ||
|
|
e437f7ba04 | ||
|
|
abfc04f621 | ||
|
|
612dfdd813 | ||
|
|
ee19ce5ef2 | ||
|
|
23c2498dee | ||
|
|
390eedc50b | ||
|
|
adc839aa40 | ||
|
|
7838ade9f3 | ||
|
|
c043c9229e | ||
|
|
05a0fdf744 | ||
|
|
dfb557b34f | ||
|
|
d0d8bfbca4 | ||
|
|
21e4eb89b2 | ||
|
|
14834e6085 | ||
|
|
267e30a19c | ||
|
|
be4fd7110d | ||
|
|
24668122d9 | ||
|
|
31d021a9ca | ||
|
|
7497e6481e | ||
|
|
de9d253f45 | ||
|
|
f4f511201b | ||
|
|
beca8b6adf | ||
|
|
457dc402d3 | ||
|
|
34b9a629a0 | ||
|
|
ad674e2666 | ||
|
|
503d483731 | ||
|
|
6e5aefbb98 | ||
|
|
7d2c9d5ef5 | ||
|
|
1734abbb76 | ||
|
|
b06a55cf53 | ||
|
|
38137b29dd | ||
|
|
fc7144f61d | ||
|
|
ac93a7fbfb | ||
|
|
48f9b86b9a | ||
|
|
6c32a8c4c1 | ||
|
|
7a08248c4e | ||
|
|
05af608774 | ||
|
|
511e41386f | ||
|
|
fd251d2a7b | ||
|
|
5836c24e7d | ||
|
|
c8f8a106ed | ||
|
|
198764f116 | ||
|
|
0dd89f6029 | ||
|
|
8da8ee2aea | ||
|
|
6db8569f09 | ||
|
|
5a0e4c1023 | ||
|
|
ded91da575 | ||
|
|
508b2ef0c6 | ||
|
|
05b8821625 | ||
|
|
01245e72ab | ||
|
|
22e9e3342b | ||
|
|
0e3911147a | ||
|
|
2aa6d52b06 | ||
|
|
561a4330cf | ||
|
|
7b4bc4f00a | ||
|
|
a012e0043b | ||
|
|
2c2294fa43 | ||
|
|
197824c6f2 | ||
|
|
22e61ef06f | ||
|
|
159eac42f3 | ||
|
|
6c77b76b7b | ||
|
|
130e9fe093 | ||
|
|
e9fb769c60 | ||
|
|
3dcb03452c | ||
|
|
9b7d30c9a0 | ||
|
|
2134a1e104 | ||
|
|
cc6957d1cc | ||
|
|
0878d5b22b | ||
|
|
c8002e58a4 | ||
|
|
cfcd1d9420 | ||
|
|
e6756d951a | ||
|
|
b9aad03e7a | ||
|
|
0bd6f3c7f5 | ||
|
|
e2ebab5f26 | ||
|
|
e018f8b6fb | ||
|
|
03bedfb3c3 | ||
|
|
bdaaca40a2 | ||
|
|
bc021c89a8 | ||
|
|
798402314c | ||
|
|
7cfb440136 | ||
|
|
80358842c4 | ||
|
|
77aedb751e | ||
|
|
739ec964db | ||
|
|
320a3109f3 | ||
|
|
2c986bc184 | ||
|
|
6c31f43cc9 | ||
|
|
7b049b99c5 | ||
|
|
bf5a70023c | ||
|
|
8d001e338f | ||
|
|
73ea0826ca | ||
|
|
66e6dab26b | ||
|
|
0138f2a00f | ||
|
|
a59058e8a5 | ||
|
|
f6b7a3c522 | ||
|
|
8fe2070d10 | ||
|
|
54bb799d15 | ||
|
|
957044825f | ||
|
|
42a0f3d504 | ||
|
|
84ad208985 | ||
|
|
3631dc17c9 | ||
|
|
bafdc63b8c | ||
|
|
51c94cd2a6 | ||
|
|
31d88398bc | ||
|
|
fbf6594758 | ||
|
|
f54a67de6d | ||
|
|
f35b2b7cab | ||
|
|
29ba5ecef6 | ||
|
|
fb50d82fd8 | ||
|
|
87e8e4b847 | ||
|
|
a71a24c0f4 | ||
|
|
76119b0f61 | ||
|
|
7843b5f417 | ||
|
|
da6662975f | ||
|
|
de4dbb7d00 | ||
|
|
3bd4bca994 | ||
|
|
296832c90e | ||
|
|
56d55a4137 | ||
|
|
626e6f8fa3 | ||
|
|
5941bf0494 | ||
|
|
29a496cdab | ||
|
|
a43d9a67c7 | ||
|
|
c47eb3bf5a | ||
|
|
a97e1641a4 | ||
|
|
86ae8ea854 | ||
|
|
d37d483097 | ||
|
|
4e96faa201 | ||
|
|
e5419ef6d7 | ||
|
|
14747a490a | ||
|
|
e5cee892ed | ||
|
|
ef4b984df4 | ||
|
|
a8f402e28d | ||
|
|
2eba99b40b | ||
|
|
7686fa1f16 | ||
|
|
51b9bab245 | ||
|
|
6b5758f4cd | ||
|
|
bd375a14a8 | ||
|
|
b01693f63e | ||
|
|
4a059d5144 | ||
|
|
f3775c0046 | ||
|
|
50fbdd86f9 | ||
|
|
1f61de0fcc | ||
|
|
e206c585bb | ||
|
|
5e46d8057d | ||
|
|
4e7709e54c | ||
|
|
5ed8f1b7d9 | ||
|
|
1d12c1f5b3 | ||
|
|
3ef93e081c | ||
|
|
18894a8e3a | ||
|
|
13ec635988 | ||
|
|
f804b8fa4b | ||
|
|
21a55ff9a1 | ||
|
|
dd350284df | ||
|
|
c010d3de8d | ||
|
|
d11dbbf9f7 | ||
|
|
75cdceb9f1 | ||
|
|
10ff93f190 | ||
|
|
bf00185809 | ||
|
|
90f03e57c2 | ||
|
|
a59fd3ebfe | ||
|
|
3eb490153d | ||
|
|
d957d8b987 | ||
|
|
5a1f252bd9 | ||
|
|
ab4585f38c | ||
|
|
3003045c0b | ||
|
|
a6f3f290b4 | ||
|
|
27d072a099 | ||
|
|
8e3df1943c | ||
|
|
8c54de66ce | ||
|
|
06b9ac2dc4 | ||
|
|
b8739d7441 | ||
|
|
23fe02993b | ||
|
|
1d177f00d2 | ||
|
|
ceaba7011f | ||
|
|
9c06f383ba | ||
|
|
e11c5e3e96 | ||
|
|
f5719f3017 | ||
|
|
163babdca7 | ||
|
|
094d1aded8 | ||
|
|
05ef20b434 | ||
|
|
cc718b3444 | ||
|
|
e98e8f6ac9 | ||
|
|
36541ed9d5 | ||
|
|
418ea82d3a | ||
|
|
130bbda00e | ||
|
|
2666bd6996 | ||
|
|
ff2c8da803 | ||
|
|
e094296f37 | ||
|
|
7c3b77fb36 | ||
|
|
fb4c4f07ca | ||
|
|
b9e25e82cf | ||
|
|
089036da29 | ||
|
|
1123bfed10 | ||
|
|
7f2293308b | ||
|
|
a65131f9d3 | ||
|
|
8a3a646c61 | ||
|
|
4384947be1 | ||
|
|
69421182ca | ||
|
|
068382f5df | ||
|
|
c4bec05466 | ||
|
|
89e1ac0a6e | ||
|
|
b84e0e11b4 | ||
|
|
d95f5f8f3b | ||
|
|
b4c0941683 | ||
|
|
cf9798cede | ||
|
|
20d2501edc | ||
|
|
d45601fdc6 | ||
|
|
c81a9a89cf | ||
|
|
87b9f9ecfb | ||
|
|
cbc473359a | ||
|
|
2eba60db75 | ||
|
|
0dcbed3f53 | ||
|
|
ca08eb65e2 | ||
|
|
6f37d9bee7 | ||
|
|
e65f6b8c8b | ||
|
|
707dc43d55 | ||
|
|
8cbb7a9319 | ||
|
|
4f5a56aadb | ||
|
|
399beb53d9 | ||
|
|
7dec9fd6e7 | ||
|
|
120f3a8918 | ||
|
|
bd672eaf5b | ||
|
|
c2500ea2d8 | ||
|
|
a4663b4b2e | ||
|
|
57c618b83a | ||
|
|
b3a4f95110 | ||
|
|
28a1eb3527 | ||
|
|
75ecc15958 | ||
|
|
2235ebce2f | ||
|
|
7147463418 | ||
|
|
010e4c8d54 | ||
|
|
6f394a0691 | ||
|
|
efd7279118 | ||
|
|
601056f3a7 | ||
|
|
0a7f96cbfb | ||
|
|
1c530c2fe0 | ||
|
|
1e576dd7c6 | ||
|
|
7a5472153b | ||
|
|
b986ce566b | ||
|
|
daba16f4be | ||
|
|
ee36e2264e | ||
|
|
329e98d9f0 | ||
|
|
f4513f7028 | ||
|
|
b1c5449428 | ||
|
|
431732f5d1 | ||
|
|
d0bff18cee | ||
|
|
8ad30d0a35 |
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -5,6 +5,11 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
|
ignore:
|
||||||
|
# ignore this dependency
|
||||||
|
# it seems a bug with dependabot as pining to commit sha should not
|
||||||
|
# trigger a new version: https://github.com/docker/buildx/pull/2222#issuecomment-1919092153
|
||||||
|
- dependency-name: "docker/docs"
|
||||||
labels:
|
labels:
|
||||||
- "dependencies"
|
- "area/dependencies"
|
||||||
- "bot"
|
- "bot"
|
||||||
|
|||||||
104
.github/labeler.yml
vendored
Normal file
104
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
|
||||||
|
# Add 'area/project' label to changes in basic project documentation and .github folder, excluding .github/workflows
|
||||||
|
area/project:
|
||||||
|
- all:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- .github/**
|
||||||
|
- LICENSE
|
||||||
|
- AUTHORS
|
||||||
|
- MAINTAINERS
|
||||||
|
- PROJECT.md
|
||||||
|
- README.md
|
||||||
|
- .gitignore
|
||||||
|
- codecov.yml
|
||||||
|
- all-globs-to-all-files: '!.github/workflows/*'
|
||||||
|
|
||||||
|
# Add 'area/github-actions' label to changes in the .github/workflows folder
|
||||||
|
area/ci:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '.github/workflows/**'
|
||||||
|
|
||||||
|
# Add 'area/bake' label to changes in the bake
|
||||||
|
area/bake:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'bake/**'
|
||||||
|
|
||||||
|
# Add 'area/bake/compose' label to changes in the bake+compose
|
||||||
|
area/bake/compose:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- bake/compose.go
|
||||||
|
- bake/compose_test.go
|
||||||
|
|
||||||
|
# Add 'area/build' label to changes in build files
|
||||||
|
area/build:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'build/**'
|
||||||
|
|
||||||
|
# Add 'area/builder' label to changes in builder files
|
||||||
|
area/builder:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'builder/**'
|
||||||
|
|
||||||
|
# Add 'area/cli' label to changes in the CLI
|
||||||
|
area/cli:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- cmd/**
|
||||||
|
- commands/**
|
||||||
|
|
||||||
|
# Add 'area/controller' label to changes in the controller
|
||||||
|
area/controller:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'controller/**'
|
||||||
|
|
||||||
|
# Add 'area/docs' label to markdown files in the docs folder
|
||||||
|
area/docs:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'docs/**/*.md'
|
||||||
|
|
||||||
|
# Add 'area/dependencies' label to changes in go dependency files
|
||||||
|
area/dependencies:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- go.mod
|
||||||
|
- go.sum
|
||||||
|
- vendor/**
|
||||||
|
|
||||||
|
# Add 'area/driver' label to changes in the driver folder
|
||||||
|
area/driver:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/docker' label to changes in the docker driver
|
||||||
|
area/driver/docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/docker/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/docker-container' label to changes in the docker-container driver
|
||||||
|
area/driver/docker-container:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/docker-container/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/kubernetes' label to changes in the kubernetes driver
|
||||||
|
area/driver/kubernetes:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/kubernetes/**'
|
||||||
|
|
||||||
|
# Add 'area/driver/remote' label to changes in the remote driver
|
||||||
|
area/driver/remote:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'driver/remote/**'
|
||||||
|
|
||||||
|
# Add 'area/hack' label to changes in the hack folder
|
||||||
|
area/hack:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'hack/**'
|
||||||
|
|
||||||
|
# Add 'area/tests' label to changes in test files
|
||||||
|
area/tests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- tests/**
|
||||||
|
- '**/*_test.go'
|
||||||
325
.github/workflows/build.yml
vendored
325
.github/workflows/build.yml
vendored
@@ -21,118 +21,240 @@ on:
|
|||||||
env:
|
env:
|
||||||
BUILDX_VERSION: "latest"
|
BUILDX_VERSION: "latest"
|
||||||
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||||
|
SCOUT_VERSION: "1.11.0"
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
DESTDIR: "./bin"
|
DESTDIR: "./bin"
|
||||||
TEST_CACHE_SCOPE: "test"
|
TEST_CACHE_SCOPE: "test"
|
||||||
|
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||||
|
GOTESTSUM_FORMAT: "standard-verbose"
|
||||||
|
GO_VERSION: "1.22"
|
||||||
|
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare-test:
|
test-integration:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
|
||||||
buildkitd-flags: --debug
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v3
|
|
||||||
with:
|
|
||||||
targets: integration-test-base
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=${{ env.TEST_CACHE_SCOPE }}
|
|
||||||
*.cache-to=type=gha,scope=${{ env.TEST_CACHE_SCOPE }}
|
|
||||||
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
needs:
|
|
||||||
- prepare-test
|
|
||||||
env:
|
env:
|
||||||
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
|
||||||
TESTFLAGS_DOCKER: "-v --parallel=1 --timeout=30m"
|
TESTFLAGS_DOCKER: "-v --parallel=1 --timeout=30m"
|
||||||
GOTESTSUM_FORMAT: "standard-verbose"
|
|
||||||
TEST_IMAGE_BUILD: "0"
|
TEST_IMAGE_BUILD: "0"
|
||||||
TEST_IMAGE_ID: "buildx-tests"
|
TEST_IMAGE_ID: "buildx-tests"
|
||||||
|
TEST_COVERAGE: "1"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
buildkit:
|
||||||
|
- master
|
||||||
|
- latest
|
||||||
|
- buildx-stable-1
|
||||||
|
- v0.14.1
|
||||||
|
- v0.13.2
|
||||||
|
- v0.12.5
|
||||||
worker:
|
worker:
|
||||||
- docker
|
|
||||||
- docker-container
|
- docker-container
|
||||||
- remote
|
- remote
|
||||||
pkg:
|
pkg:
|
||||||
- ./tests
|
- ./tests
|
||||||
|
mode:
|
||||||
|
- ""
|
||||||
|
- experimental
|
||||||
include:
|
include:
|
||||||
- pkg: ./...
|
- worker: docker
|
||||||
skip-integration-tests: 1
|
pkg: ./tests
|
||||||
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
- worker: docker
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
|
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||||
|
pkg: ./tests
|
||||||
|
mode: experimental
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
echo "TESTREPORTS_NAME=${{ github.job }}-$(echo "${{ matrix.pkg }}-${{ matrix.buildkit }}-${{ matrix.worker }}-${{ matrix.mode }}" | tr -dc '[:alnum:]-\n\r' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV
|
||||||
|
if [ -n "${{ matrix.buildkit }}" ]; then
|
||||||
|
echo "TEST_BUILDKIT_TAG=${{ matrix.buildkit }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
||||||
|
case "${{ matrix.worker }}" in
|
||||||
|
docker | docker+containerd)
|
||||||
|
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "TESTFLAGS=${{ env.TESTFLAGS }} $testFlags" >> $GITHUB_ENV
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [[ "${{ matrix.worker }}" == "docker"* ]]; then
|
||||||
|
echo "TEST_DOCKERD=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if [ "${{ matrix.mode }}" = "experimental" ]; then
|
||||||
|
echo "TEST_BUILDX_EXPERIMENTAL=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||||
buildkitd-flags: --debug
|
buildkitd-flags: --debug
|
||||||
-
|
-
|
||||||
name: Build test image
|
name: Build test image
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v5
|
||||||
with:
|
with:
|
||||||
targets: integration-test
|
targets: integration-test
|
||||||
set: |
|
set: |
|
||||||
*.cache-from=type=gha,scope=${{ env.TEST_CACHE_SCOPE }}
|
|
||||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||||
-
|
-
|
||||||
name: Test
|
name: Test
|
||||||
run: |
|
run: |
|
||||||
export TEST_REPORT_SUFFIX=-${{ github.job }}-$(echo "${{ matrix.pkg }}-${{ matrix.skip-integration-tests }}-${{ matrix.worker }}" | tr -dc '[:alnum:]-\n\r' | tr '[:upper:]' '[:lower:]')
|
|
||||||
./hack/test
|
./hack/test
|
||||||
env:
|
env:
|
||||||
TEST_DOCKERD: "${{ (matrix.worker == 'docker' || matrix.worker == 'docker-container') && '1' || '0' }}"
|
TEST_REPORT_SUFFIX: "-${{ env.TESTREPORTS_NAME }}"
|
||||||
TESTFLAGS: "${{ (matrix.worker == 'docker' || matrix.worker == 'docker-container') && env.TESTFLAGS_DOCKER || env.TESTFLAGS }} --run=//worker=${{ matrix.worker }}$"
|
|
||||||
TESTPKGS: "${{ matrix.pkg }}"
|
TESTPKGS: "${{ matrix.pkg }}"
|
||||||
SKIP_INTEGRATION_TESTS: "${{ matrix.skip-integration-tests }}"
|
|
||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: always()
|
if: always()
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v4
|
||||||
with:
|
with:
|
||||||
directory: ./bin/testreports
|
directory: ./bin/testreports
|
||||||
|
flags: integration
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
disable_file_fixes: true
|
||||||
-
|
-
|
||||||
name: Generate annotations
|
name: Generate annotations
|
||||||
if: always()
|
if: always()
|
||||||
uses: crazy-max/.github/.github/actions/gotest-annotations@1a64ea6d01db9a48aa61954cb20e265782c167d9
|
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||||
with:
|
with:
|
||||||
directory: ./bin/testreports
|
directory: ./bin/testreports
|
||||||
-
|
-
|
||||||
name: Upload test reports
|
name: Upload test reports
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-reports
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
path: ./bin/testreports
|
path: ./bin/testreports
|
||||||
|
|
||||||
|
test-unit:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-24.04
|
||||||
|
- macos-12
|
||||||
|
- windows-2022
|
||||||
|
env:
|
||||||
|
SKIP_INTEGRATION_TESTS: 1
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: "${{ env.GO_VERSION }}"
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
testreportsName=${{ github.job }}--${{ matrix.os }}
|
||||||
|
testreportsBaseDir=./bin/testreports
|
||||||
|
testreportsDir=$testreportsBaseDir/$testreportsName
|
||||||
|
echo "TESTREPORTS_NAME=$testreportsName" >> $GITHUB_ENV
|
||||||
|
echo "TESTREPORTS_BASEDIR=$testreportsBaseDir" >> $GITHUB_ENV
|
||||||
|
echo "TESTREPORTS_DIR=$testreportsDir" >> $GITHUB_ENV
|
||||||
|
mkdir -p $testreportsDir
|
||||||
|
shell: bash
|
||||||
|
-
|
||||||
|
name: Install gotestsum
|
||||||
|
run: |
|
||||||
|
go install gotest.tools/gotestsum@${{ env.GOTESTSUM_VERSION }}
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
env:
|
||||||
|
TMPDIR: ${{ runner.temp }}
|
||||||
|
run: |
|
||||||
|
gotestsum \
|
||||||
|
--jsonfile="${{ env.TESTREPORTS_DIR }}/go-test-report.json" \
|
||||||
|
--junitfile="${{ env.TESTREPORTS_DIR }}/junit-report.xml" \
|
||||||
|
--packages="./..." \
|
||||||
|
-- \
|
||||||
|
"-mod=vendor" \
|
||||||
|
"-coverprofile" "${{ env.TESTREPORTS_DIR }}/coverage.txt" \
|
||||||
|
"-covermode" "atomic" ${{ env.TESTFLAGS }}
|
||||||
|
shell: bash
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
if: always()
|
||||||
|
uses: codecov/codecov-action@v4
|
||||||
|
with:
|
||||||
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
disable_file_fixes: true
|
||||||
|
-
|
||||||
|
name: Generate annotations
|
||||||
|
if: always()
|
||||||
|
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||||
|
with:
|
||||||
|
directory: ${{ env.TESTREPORTS_DIR }}
|
||||||
|
-
|
||||||
|
name: Upload test reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||||
|
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||||
|
|
||||||
|
govulncheck:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
|
# required to check out the repository
|
||||||
|
contents: read
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
version: ${{ env.BUILDX_VERSION }}
|
||||||
|
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||||
|
buildkitd-flags: --debug
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
uses: docker/bake-action@v5
|
||||||
|
with:
|
||||||
|
targets: govulncheck
|
||||||
|
env:
|
||||||
|
GOVULNCHECK_FORMAT: sarif
|
||||||
|
-
|
||||||
|
name: Upload SARIF report
|
||||||
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||||
|
|
||||||
prepare-binaries:
|
prepare-binaries:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Create matrix
|
name: Create matrix
|
||||||
id: platforms
|
id: platforms
|
||||||
@@ -144,7 +266,7 @@ jobs:
|
|||||||
echo ${{ steps.platforms.outputs.matrix }}
|
echo ${{ steps.platforms.outputs.matrix }}
|
||||||
|
|
||||||
binaries:
|
binaries:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- prepare-binaries
|
- prepare-binaries
|
||||||
strategy:
|
strategy:
|
||||||
@@ -159,13 +281,13 @@ jobs:
|
|||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||||
@@ -180,27 +302,28 @@ jobs:
|
|||||||
CACHE_TO: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
CACHE_TO: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: buildx
|
name: buildx-${{ env.PLATFORM_PAIR }}
|
||||||
path: ${{ env.DESTDIR }}/*
|
path: ${{ env.DESTDIR }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
bin-image:
|
bin-image:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
needs:
|
needs:
|
||||||
- test
|
- test-integration
|
||||||
|
- test-unit
|
||||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
version: ${{ env.BUILDX_VERSION }}
|
||||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||||
@@ -208,7 +331,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
${{ env.REPO_SLUG }}
|
${{ env.REPO_SLUG }}
|
||||||
@@ -220,13 +343,13 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v5
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
@@ -238,21 +361,55 @@ jobs:
|
|||||||
*.cache-from=type=gha,scope=bin-image
|
*.cache-from=type=gha,scope=bin-image
|
||||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||||
|
|
||||||
release:
|
scout:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
|
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||||
|
permissions:
|
||||||
|
# required to write sarif report
|
||||||
|
security-events: write
|
||||||
needs:
|
needs:
|
||||||
- test
|
- bin-image
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||||
|
-
|
||||||
|
name: Scout
|
||||||
|
id: scout
|
||||||
|
uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4
|
||||||
|
with:
|
||||||
|
version: ${{ env.SCOUT_VERSION }}
|
||||||
|
format: sarif
|
||||||
|
image: registry://${{ env.REPO_SLUG }}:master
|
||||||
|
-
|
||||||
|
name: Upload SARIF report
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: ${{ steps.scout.outputs.result-file }}
|
||||||
|
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- test-integration
|
||||||
|
- test-unit
|
||||||
- binaries
|
- binaries
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Download binaries
|
name: Download binaries
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: buildx
|
|
||||||
path: ${{ env.DESTDIR }}
|
path: ${{ env.DESTDIR }}
|
||||||
|
pattern: buildx-*
|
||||||
|
merge-multiple: true
|
||||||
-
|
-
|
||||||
name: Create checksums
|
name: Create checksums
|
||||||
run: ./hack/hash-files
|
run: ./hack/hash-files
|
||||||
@@ -267,33 +424,9 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: ${{ env.DESTDIR }}/*
|
files: ${{ env.DESTDIR }}/*
|
||||||
|
|
||||||
buildkit-edge:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: ${{ env.BUILDX_VERSION }}
|
|
||||||
driver-opts: image=moby/buildkit:master
|
|
||||||
buildkitd-flags: --debug
|
|
||||||
-
|
|
||||||
# Just run a bake target to check eveything runs fine
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v3
|
|
||||||
with:
|
|
||||||
targets: binaries
|
|
||||||
|
|||||||
42
.github/workflows/codeql.yml
vendored
Normal file
42
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: codeql
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
GO_VERSION: "1.22"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codeql:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v3
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
-
|
||||||
|
name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v3
|
||||||
|
-
|
||||||
|
name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v3
|
||||||
|
with:
|
||||||
|
category: "/language:go"
|
||||||
47
.github/workflows/docs-release.yml
vendored
47
.github/workflows/docs-release.yml
vendored
@@ -1,18 +1,23 @@
|
|||||||
name: docs-release
|
name: docs-release
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Git tag'
|
||||||
|
required: true
|
||||||
release:
|
release:
|
||||||
types:
|
types:
|
||||||
- released
|
- released
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
open-pr:
|
open-pr:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.event.release.prerelease != true && github.repository == 'docker/buildx' }}
|
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout docs repo
|
name: Checkout docs repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
repository: docker/docs
|
repository: docker/docs
|
||||||
@@ -20,39 +25,47 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Prepare
|
name: Prepare
|
||||||
run: |
|
run: |
|
||||||
rm -rf ./_data/buildx/*
|
rm -rf ./data/buildx/*
|
||||||
|
if [ -n "${{ github.event.inputs.tag }}" ]; then
|
||||||
|
echo "RELEASE_NAME=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "RELEASE_NAME=${{ github.event.release.name }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
-
|
-
|
||||||
name: Build docs
|
name: Generate yaml
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v5
|
||||||
with:
|
with:
|
||||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ github.event.release.name }}
|
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
|
provenance: false
|
||||||
set: |
|
set: |
|
||||||
*.output=/tmp/buildx-docs
|
*.output=/tmp/buildx-docs
|
||||||
env:
|
env:
|
||||||
DOCS_FORMATS: yaml
|
DOCS_FORMATS: yaml
|
||||||
-
|
-
|
||||||
name: Copy files
|
name: Copy yaml
|
||||||
run: |
|
run: |
|
||||||
cp /tmp/buildx-docs/out/reference/*.yaml ./_data/buildx/
|
cp /tmp/buildx-docs/out/reference/*.yaml ./data/buildx/
|
||||||
-
|
-
|
||||||
name: Commit changes
|
name: Update vendor
|
||||||
run: |
|
run: |
|
||||||
git add -A .
|
make vendor
|
||||||
|
env:
|
||||||
|
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||||
-
|
-
|
||||||
name: Create PR on docs repo
|
name: Create PR on docs repo
|
||||||
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666
|
uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v7.0.1
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
push-to-fork: docker-tools-robot/docker.github.io
|
||||||
commit-message: "build: update buildx reference to ${{ github.event.release.name }}"
|
commit-message: "vendor: github.com/docker/buildx ${{ env.RELEASE_NAME }}"
|
||||||
signoff: true
|
signoff: true
|
||||||
branch: dispatch/buildx-ref-${{ github.event.release.name }}
|
branch: dispatch/buildx-ref-${{ env.RELEASE_NAME }}
|
||||||
delete-branch: true
|
delete-branch: true
|
||||||
title: Update buildx reference to ${{ github.event.release.name }}
|
title: Update buildx reference to ${{ env.RELEASE_NAME }}
|
||||||
body: |
|
body: |
|
||||||
Update the buildx reference documentation to keep in sync with the latest release `${{ github.event.release.name }}`
|
Update the buildx reference documentation to keep in sync with the latest release `${{ env.RELEASE_NAME }}`
|
||||||
draft: false
|
draft: false
|
||||||
|
|||||||
17
.github/workflows/docs-upstream.yml
vendored
17
.github/workflows/docs-upstream.yml
vendored
@@ -22,21 +22,22 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docs-yaml:
|
docs-yaml:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
-
|
-
|
||||||
name: Build reference YAML docs
|
name: Build reference YAML docs
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v5
|
||||||
with:
|
with:
|
||||||
targets: update-docs
|
targets: update-docs
|
||||||
|
provenance: false
|
||||||
set: |
|
set: |
|
||||||
*.output=/tmp/buildx-docs
|
*.output=/tmp/buildx-docs
|
||||||
*.cache-from=type=gha,scope=docs-yaml
|
*.cache-from=type=gha,scope=docs-yaml
|
||||||
@@ -45,18 +46,18 @@ jobs:
|
|||||||
DOCS_FORMATS: yaml
|
DOCS_FORMATS: yaml
|
||||||
-
|
-
|
||||||
name: Upload reference YAML docs
|
name: Upload reference YAML docs
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: docs-yaml
|
name: docs-yaml
|
||||||
path: /tmp/buildx-docs/out/reference
|
path: /tmp/buildx-docs/out/reference
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
validate:
|
validate:
|
||||||
uses: docker/docs/.github/workflows/validate-upstream.yml@main
|
uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
|
||||||
needs:
|
needs:
|
||||||
- docs-yaml
|
- docs-yaml
|
||||||
with:
|
with:
|
||||||
repo: https://github.com/${{ github.repository }}
|
module-name: docker/buildx
|
||||||
data-files-id: docs-yaml
|
data-files-id: docs-yaml
|
||||||
data-files-folder: buildx
|
data-files-folder: buildx
|
||||||
data-files-placeholder-folder: engine/reference/commandline
|
create-placeholder-stubs: true
|
||||||
|
|||||||
88
.github/workflows/e2e.yml
vendored
88
.github/workflows/e2e.yml
vendored
@@ -22,18 +22,18 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v5
|
||||||
with:
|
with:
|
||||||
targets: binaries
|
targets: binaries
|
||||||
set: |
|
set: |
|
||||||
@@ -46,7 +46,7 @@ jobs:
|
|||||||
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
|
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: binary
|
name: binary
|
||||||
path: ${{ env.DESTDIR }}/build
|
path: ${{ env.DESTDIR }}/build
|
||||||
@@ -82,6 +82,10 @@ jobs:
|
|||||||
driver-opt: qemu.install=true
|
driver-opt: qemu.install=true
|
||||||
- driver: remote
|
- driver: remote
|
||||||
endpoint: tcp://localhost:1234
|
endpoint: tcp://localhost:1234
|
||||||
|
- driver: docker-container
|
||||||
|
metadata-provenance: max
|
||||||
|
- driver: docker-container
|
||||||
|
metadata-warnings: true
|
||||||
exclude:
|
exclude:
|
||||||
- driver: docker
|
- driver: docker
|
||||||
multi-node: mnode-true
|
multi-node: mnode-true
|
||||||
@@ -96,14 +100,14 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
||||||
-
|
-
|
||||||
name: Install buildx
|
name: Install buildx
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: binary
|
name: binary
|
||||||
path: /home/runner/.docker/cli-plugins
|
path: /home/runner/.docker/cli-plugins
|
||||||
@@ -129,70 +133,18 @@ jobs:
|
|||||||
else
|
else
|
||||||
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
if [ -n "${{ matrix.metadata-provenance }}" ]; then
|
||||||
|
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if [ -n "${{ matrix.metadata-warnings }}" ]; then
|
||||||
|
echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: actions/github-script@v6
|
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||||
with:
|
with:
|
||||||
script: |
|
version: ${{ env.K3S_VERSION }}
|
||||||
const fs = require('fs');
|
|
||||||
|
|
||||||
let wait = function(milliseconds) {
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
if (typeof(milliseconds) !== 'number') {
|
|
||||||
throw new Error('milleseconds not a number');
|
|
||||||
}
|
|
||||||
setTimeout(() => resolve("done!"), milliseconds)
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const kubeconfig="/tmp/buildkit-k3s/kubeconfig.yaml";
|
|
||||||
core.info(`storing kubeconfig in ${kubeconfig}`);
|
|
||||||
|
|
||||||
await exec.exec('docker', ["run", "-d",
|
|
||||||
"--privileged",
|
|
||||||
"--name=buildkit-k3s",
|
|
||||||
"-e", "K3S_KUBECONFIG_OUTPUT="+kubeconfig,
|
|
||||||
"-e", "K3S_KUBECONFIG_MODE=666",
|
|
||||||
"-v", "/tmp/buildkit-k3s:/tmp/buildkit-k3s",
|
|
||||||
"-p", "6443:6443",
|
|
||||||
"-p", "80:80",
|
|
||||||
"-p", "443:443",
|
|
||||||
"-p", "8080:8080",
|
|
||||||
"rancher/k3s:${{ env.K3S_VERSION }}", "server"
|
|
||||||
]);
|
|
||||||
await wait(10000);
|
|
||||||
|
|
||||||
core.exportVariable('KUBECONFIG', kubeconfig);
|
|
||||||
|
|
||||||
let nodeName;
|
|
||||||
for (let count = 1; count <= 5; count++) {
|
|
||||||
try {
|
|
||||||
const nodeNameOutput = await exec.getExecOutput("kubectl get nodes --no-headers -oname");
|
|
||||||
nodeName = nodeNameOutput.stdout
|
|
||||||
} catch (error) {
|
|
||||||
core.info(`Unable to resolve node name (${error.message}). Attempt ${count} of 5.`)
|
|
||||||
} finally {
|
|
||||||
if (nodeName) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
await wait(5000);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!nodeName) {
|
|
||||||
throw new Error(`Unable to resolve node name after 5 attempts.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
await exec.exec(`kubectl wait --for=condition=Ready ${nodeName}`);
|
|
||||||
} catch (error) {
|
|
||||||
core.setFailed(error.message);
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Print KUBECONFIG
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
run: |
|
|
||||||
yq ${{ env.KUBECONFIG }}
|
|
||||||
-
|
-
|
||||||
name: Launch remote buildkitd
|
name: Launch remote buildkitd
|
||||||
if: matrix.driver == 'remote'
|
if: matrix.driver == 'remote'
|
||||||
|
|||||||
21
.github/workflows/labeler.yml
vendored
Normal file
21
.github/workflows/labeler.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
name: labeler
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
sync-labels: true
|
||||||
86
.github/workflows/validate.yml
vendored
86
.github/workflows/validate.yml
vendored
@@ -17,26 +17,82 @@ on:
|
|||||||
- '.github/releases.json'
|
- '.github/releases.json'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate:
|
prepare:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
strategy:
|
outputs:
|
||||||
fail-fast: false
|
includes: ${{ steps.matrix.outputs.includes }}
|
||||||
matrix:
|
|
||||||
target:
|
|
||||||
- lint
|
|
||||||
- validate-vendor
|
|
||||||
- validate-docs
|
|
||||||
- validate-generated-files
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
-
|
||||||
|
name: Matrix
|
||||||
|
id: matrix
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
let def = {};
|
||||||
|
await core.group(`Parsing definition`, async () => {
|
||||||
|
const printEnv = Object.assign({}, process.env, {
|
||||||
|
GOLANGCI_LINT_MULTIPLATFORM: process.env.GITHUB_REPOSITORY === 'docker/buildx' ? '1' : ''
|
||||||
|
});
|
||||||
|
const resPrint = await exec.getExecOutput('docker', ['buildx', 'bake', 'validate', '--print'], {
|
||||||
|
ignoreReturnCode: true,
|
||||||
|
env: printEnv
|
||||||
|
});
|
||||||
|
if (resPrint.stderr.length > 0 && resPrint.exitCode != 0) {
|
||||||
|
throw new Error(res.stderr);
|
||||||
|
}
|
||||||
|
def = JSON.parse(resPrint.stdout.trim());
|
||||||
|
});
|
||||||
|
await core.group(`Generating matrix`, async () => {
|
||||||
|
const includes = [];
|
||||||
|
for (const targetName of Object.keys(def.target)) {
|
||||||
|
const target = def.target[targetName];
|
||||||
|
if (target.platforms && target.platforms.length > 0) {
|
||||||
|
target.platforms.forEach(platform => {
|
||||||
|
includes.push({
|
||||||
|
target: targetName,
|
||||||
|
platform: platform
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
includes.push({
|
||||||
|
target: targetName
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
core.info(JSON.stringify(includes, null, 2));
|
||||||
|
core.setOutput('includes', JSON.stringify(includes));
|
||||||
|
});
|
||||||
|
|
||||||
|
validate:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
needs:
|
||||||
|
- prepare
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include: ${{ fromJson(needs.prepare.outputs.includes) }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
||||||
|
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
-
|
-
|
||||||
name: Run
|
name: Validate
|
||||||
run: |
|
uses: docker/bake-action@v5
|
||||||
make ${{ matrix.target }}
|
with:
|
||||||
|
targets: ${{ matrix.target }}
|
||||||
|
set: |
|
||||||
|
*.platform=${{ matrix.platform }}
|
||||||
|
|||||||
@@ -1,12 +1,8 @@
|
|||||||
run:
|
run:
|
||||||
timeout: 10m
|
timeout: 30m
|
||||||
skip-files:
|
|
||||||
- ".*\\.pb\\.go$"
|
|
||||||
|
|
||||||
modules-download-mode: vendor
|
modules-download-mode: vendor
|
||||||
|
|
||||||
build-tags:
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- gofmt
|
||||||
@@ -25,16 +21,30 @@ linters:
|
|||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
govet:
|
||||||
|
enable:
|
||||||
|
- nilness
|
||||||
|
- unusedwrite
|
||||||
|
# enable-all: true
|
||||||
|
# disable:
|
||||||
|
# - fieldalignment
|
||||||
|
# - shadow
|
||||||
depguard:
|
depguard:
|
||||||
list-type: blacklist
|
rules:
|
||||||
include-go-root: true
|
main:
|
||||||
packages:
|
deny:
|
||||||
# The io/ioutil package has been deprecated.
|
- pkg: "github.com/containerd/containerd/errdefs"
|
||||||
# https://go.dev/doc/go1.16#ioutil
|
desc: The containerd errdefs package was migrated to a separate module. Use github.com/containerd/errdefs instead.
|
||||||
- io/ioutil
|
- pkg: "github.com/containerd/containerd/log"
|
||||||
|
desc: The containerd log package was migrated to a separate module. Use github.com/containerd/log instead.
|
||||||
|
- pkg: "github.com/containerd/containerd/platforms"
|
||||||
|
desc: The containerd platforms package was migrated to a separate module. Use github.com/containerd/platforms instead.
|
||||||
|
- pkg: "io/ioutil"
|
||||||
|
desc: The io/ioutil package has been deprecated.
|
||||||
forbidigo:
|
forbidigo:
|
||||||
forbid:
|
forbid:
|
||||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||||
|
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||||
gosec:
|
gosec:
|
||||||
excludes:
|
excludes:
|
||||||
- G204 # Audit use of command execution
|
- G204 # Audit use of command execution
|
||||||
@@ -43,7 +53,28 @@ linters-settings:
|
|||||||
G306: "0644"
|
G306: "0644"
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
exclude-files:
|
||||||
|
- ".*\\.pb\\.go$"
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- linters:
|
- linters:
|
||||||
- revive
|
- revive
|
||||||
text: "stutters"
|
text: "stutters"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "empty-block"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "superfluous-else"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "unused-parameter"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "redefines-builtin-id"
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: "if-return"
|
||||||
|
|
||||||
|
# show all
|
||||||
|
max-issues-per-linter: 0
|
||||||
|
max-same-issues: 0
|
||||||
|
|||||||
14
.mailmap
14
.mailmap
@@ -1,11 +1,25 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
|||||||
69
AUTHORS
69
AUTHORS
@@ -1,45 +1,112 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
accetto <34798830+accetto@users.noreply.github.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
|
Aleksa Sarai <cyphar@cyphar.com>
|
||||||
Alex Couture-Beil <alex@earthly.dev>
|
Alex Couture-Beil <alex@earthly.dev>
|
||||||
Andrew Haines <andrew.haines@zencargo.com>
|
Andrew Haines <andrew.haines@zencargo.com>
|
||||||
|
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||||
Anthony Poschen <zanven42@gmail.com>
|
Anthony Poschen <zanven42@gmail.com>
|
||||||
|
Arnold Sobanski <arnold@l4g.dev>
|
||||||
Artur Klauser <Artur.Klauser@computer.org>
|
Artur Klauser <Artur.Klauser@computer.org>
|
||||||
Batuhan Apaydın <developerguy2@gmail.com>
|
Avi Deitcher <avi@deitcher.net>
|
||||||
|
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||||
|
Ben Peachey <potherca@gmail.com>
|
||||||
|
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||||
Bin Du <bindu@microsoft.com>
|
Bin Du <bindu@microsoft.com>
|
||||||
Brandon Philips <brandon@ifup.org>
|
Brandon Philips <brandon@ifup.org>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Bryce Lampe <bryce@pulumi.com>
|
||||||
|
Cameron Adams <pnzreba@gmail.com>
|
||||||
|
Christian Dupuis <cd@atomist.com>
|
||||||
|
Cory Snider <csnider@mirantis.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
CrazyMax <github@crazymax.dev>
|
||||||
|
David Gageot <david.gageot@docker.com>
|
||||||
|
David Karlsson <david.karlsson@docker.com>
|
||||||
|
David Scott <dave@recoil.org>
|
||||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||||
Devin Bayer <dev@doubly.so>
|
Devin Bayer <dev@doubly.so>
|
||||||
Djordje Lukic <djordje.lukic@docker.com>
|
Djordje Lukic <djordje.lukic@docker.com>
|
||||||
|
Dmitry Makovey <dmakovey@gitlab.com>
|
||||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||||
Donghui Wang <977675308@qq.com>
|
Donghui Wang <977675308@qq.com>
|
||||||
|
Doug Borg <dougborg@apple.com>
|
||||||
|
Edgar Lee <edgarl@netflix.com>
|
||||||
|
Eli Treuherz <et@arenko.group>
|
||||||
|
Eliott Wiener <eliottwiener@gmail.com>
|
||||||
|
Elran Shefer <elran.shefer@velocity.tech>
|
||||||
faust <faustin@fala.red>
|
faust <faustin@fala.red>
|
||||||
Felipe Santos <felipecassiors@gmail.com>
|
Felipe Santos <felipecassiors@gmail.com>
|
||||||
|
Felix de Souza <fdesouza@palantir.com>
|
||||||
Fernando Miguel <github@FernandoMiguel.net>
|
Fernando Miguel <github@FernandoMiguel.net>
|
||||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||||
gracenoah <gracenoahgh@gmail.com>
|
gracenoah <gracenoahgh@gmail.com>
|
||||||
|
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||||
|
guoguangwu <guoguangwu@magic-shield.com>
|
||||||
Hollow Man <hollowman@hollowman.ml>
|
Hollow Man <hollowman@hollowman.ml>
|
||||||
|
Ian King'ori <kingorim.ian@gmail.com>
|
||||||
|
idnandre <andre@idntimes.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
|
jaihwan104 <jaihwan104@woowahan.com>
|
||||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||||
|
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||||
|
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||||
|
Jonathan Piché <jpiche@coveo.com>
|
||||||
|
Justin Chadwell <me@jedevc.com>
|
||||||
|
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||||
khs1994 <khs1994@khs1994.com>
|
khs1994 <khs1994@khs1994.com>
|
||||||
|
Kijima Daigo <norimaking777@gmail.com>
|
||||||
|
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||||
Kotaro Adachi <k33asby@gmail.com>
|
Kotaro Adachi <k33asby@gmail.com>
|
||||||
|
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||||
l00397676 <lujingxiao@huawei.com>
|
l00397676 <lujingxiao@huawei.com>
|
||||||
|
Laura Brehm <laurabrehm@hey.com>
|
||||||
|
Laurent Goderre <laurent.goderre@docker.com>
|
||||||
|
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||||
|
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||||
Michal Augustyn <michal.augustyn@mail.com>
|
Michal Augustyn <michal.augustyn@mail.com>
|
||||||
|
Milas Bowman <milas.bowman@docker.com>
|
||||||
|
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||||
|
Moleus <fafufuburr@gmail.com>
|
||||||
|
Nick Santos <nick.santos@docker.com>
|
||||||
|
Nick Sieger <nick@nicksieger.com>
|
||||||
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
|
Niklas Gehlen <niklas@namespacelabs.com>
|
||||||
Patrick Van Stee <patrick@vanstee.me>
|
Patrick Van Stee <patrick@vanstee.me>
|
||||||
|
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||||
|
Phong Tran <tran.pho@northeastern.edu>
|
||||||
|
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||||
|
Rob Murray <rob.murray@docker.com>
|
||||||
|
robertlestak <robert.lestak@umusic.com>
|
||||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||||
|
Sean P. Kane <spkane00@gmail.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Shaun Thompson <shaun.thompson@docker.com>
|
||||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Simon A. Eugster <simon.eu@gmail.com>
|
||||||
Solomon Hykes <sh.github.6811@hykes.org>
|
Solomon Hykes <sh.github.6811@hykes.org>
|
||||||
|
Sumner Warren <sumner.warren@gmail.com>
|
||||||
Sune Keller <absukl@almbrand.dk>
|
Sune Keller <absukl@almbrand.dk>
|
||||||
|
Talon Bowler <talon.bowler@docker.com>
|
||||||
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
|
Tim Smith <tismith@rvohealth.com>
|
||||||
|
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||||
|
Tyler Smith <tylerlwsmith@gmail.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Ulysses Souza <ulyssessouza@gmail.com>
|
Ulysses Souza <ulyssessouza@gmail.com>
|
||||||
|
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||||
Wang Jinglei <morlay.null@gmail.com>
|
Wang Jinglei <morlay.null@gmail.com>
|
||||||
|
Wei <daviseago@gmail.com>
|
||||||
|
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||||
Xiang Dai <764524258@qq.com>
|
Xiang Dai <764524258@qq.com>
|
||||||
|
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||||
zelahi <elahi.zuhayr@gmail.com>
|
zelahi <elahi.zuhayr@gmail.com>
|
||||||
|
Zero <tobewhatwewant@gmail.com>
|
||||||
|
zhyon404 <zhyong4@gmail.com>
|
||||||
|
Zsolt <zsolt.szeberenyi@figured.com>
|
||||||
|
|||||||
79
Dockerfile
79
Dockerfile
@@ -1,18 +1,23 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.20
|
ARG GO_VERSION=1.22
|
||||||
ARG XX_VERSION=1.2.1
|
ARG XX_VERSION=1.4.0
|
||||||
ARG DOCKERD_VERSION=20.10.14
|
|
||||||
|
# for testing
|
||||||
|
ARG DOCKER_VERSION=27.1.1
|
||||||
|
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||||
ARG GOTESTSUM_VERSION=v1.9.0
|
ARG GOTESTSUM_VERSION=v1.9.0
|
||||||
ARG REGISTRY_VERSION=2.8.0
|
ARG REGISTRY_VERSION=2.8.0
|
||||||
ARG BUILDKIT_VERSION=v0.11.6
|
ARG BUILDKIT_VERSION=v0.14.1
|
||||||
|
ARG UNDOCK_VERSION=0.7.0
|
||||||
|
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
|
||||||
|
|
||||||
# xx is a helper for cross-compilation
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||||
|
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||||
|
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||||
|
FROM registry:$REGISTRY_VERSION AS registry
|
||||||
|
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||||
|
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||||
|
|
||||||
FROM golatest AS gobase
|
FROM golatest AS gobase
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
@@ -21,16 +26,38 @@ ENV GOFLAGS=-mod=vendor
|
|||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
FROM registry:$REGISTRY_VERSION AS registry
|
|
||||||
|
|
||||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
|
||||||
|
|
||||||
FROM gobase AS gotestsum
|
FROM gobase AS gotestsum
|
||||||
ARG GOTESTSUM_VERSION
|
ARG GOTESTSUM_VERSION
|
||||||
ENV GOFLAGS=
|
ENV GOFLAGS=""
|
||||||
RUN --mount=target=/root/.cache,type=cache \
|
RUN --mount=target=/root/.cache,type=cache <<EOT
|
||||||
GOBIN=/out/ go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" && \
|
set -ex
|
||||||
/out/gotestsum --version
|
go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}"
|
||||||
|
go install "github.com/wadey/gocovmerge@latest"
|
||||||
|
mkdir /out
|
||||||
|
/go/bin/gotestsum --version
|
||||||
|
mv /go/bin/gotestsum /out
|
||||||
|
mv /go/bin/gocovmerge /out
|
||||||
|
EOT
|
||||||
|
COPY --chmod=755 <<"EOF" /out/gotestsumandcover
|
||||||
|
#!/bin/sh
|
||||||
|
set -x
|
||||||
|
if [ -z "$GO_TEST_COVERPROFILE" ]; then
|
||||||
|
exec gotestsum "$@"
|
||||||
|
fi
|
||||||
|
coverdir="$(dirname "$GO_TEST_COVERPROFILE")"
|
||||||
|
mkdir -p "$coverdir/helpers"
|
||||||
|
gotestsum "$@" "-coverprofile=$GO_TEST_COVERPROFILE"
|
||||||
|
ecode=$?
|
||||||
|
go tool covdata textfmt -i=$coverdir/helpers -o=$coverdir/helpers-report.txt
|
||||||
|
gocovmerge "$coverdir/helpers-report.txt" "$GO_TEST_COVERPROFILE" > "$coverdir/merged-report.txt"
|
||||||
|
mv "$coverdir/merged-report.txt" "$GO_TEST_COVERPROFILE"
|
||||||
|
rm "$coverdir/helpers-report.txt"
|
||||||
|
for f in "$coverdir/helpers"/*; do
|
||||||
|
rm "$f"
|
||||||
|
done
|
||||||
|
rmdir "$coverdir/helpers"
|
||||||
|
exit $ecode
|
||||||
|
EOF
|
||||||
|
|
||||||
FROM gobase AS buildx-version
|
FROM gobase AS buildx-version
|
||||||
RUN --mount=type=bind,target=. <<EOT
|
RUN --mount=type=bind,target=. <<EOT
|
||||||
@@ -42,6 +69,7 @@ EOT
|
|||||||
|
|
||||||
FROM gobase AS buildx-build
|
FROM gobase AS buildx-build
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
|
ARG GO_EXTRA_FLAGS
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=type=bind,target=. \
|
||||||
--mount=type=cache,target=/root/.cache \
|
--mount=type=cache,target=/root/.cache \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
@@ -77,11 +105,24 @@ FROM binaries-$TARGETOS AS binaries
|
|||||||
ARG BUILDKIT_SBOM_SCAN_STAGE=true
|
ARG BUILDKIT_SBOM_SCAN_STAGE=true
|
||||||
|
|
||||||
FROM gobase AS integration-test-base
|
FROM gobase AS integration-test-base
|
||||||
RUN apk add --no-cache docker runc containerd
|
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||||
COPY --link --from=gotestsum /out/gotestsum /usr/bin/
|
RUN apk add --no-cache \
|
||||||
|
btrfs-progs \
|
||||||
|
e2fsprogs \
|
||||||
|
e2fsprogs-extra \
|
||||||
|
ip6tables \
|
||||||
|
iptables \
|
||||||
|
openssl \
|
||||||
|
shadow-uidmap \
|
||||||
|
xfsprogs \
|
||||||
|
xz
|
||||||
|
COPY --link --from=gotestsum /out /usr/bin/
|
||||||
COPY --link --from=registry /bin/registry /usr/bin/
|
COPY --link --from=registry /bin/registry /usr/bin/
|
||||||
|
COPY --link --from=docker-engine / /usr/bin/
|
||||||
|
COPY --link --from=docker-cli / /usr/bin/
|
||||||
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||||
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||||
|
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||||
COPY --link --from=binaries /buildx /usr/bin/
|
COPY --link --from=binaries /buildx /usr/bin/
|
||||||
|
|
||||||
FROM integration-test-base AS integration-test
|
FROM integration-test-base AS integration-test
|
||||||
@@ -102,7 +143,7 @@ FROM scratch AS release
|
|||||||
COPY --from=releaser /out/ /
|
COPY --from=releaser /out/ /
|
||||||
|
|
||||||
# Shell
|
# Shell
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
FROM docker:$DOCKER_VERSION AS dockerd-release
|
||||||
FROM alpine AS shell
|
FROM alpine AS shell
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
|
|||||||
@@ -153,6 +153,7 @@ made through a pull request.
|
|||||||
"akihirosuda",
|
"akihirosuda",
|
||||||
"crazy-max",
|
"crazy-max",
|
||||||
"jedevc",
|
"jedevc",
|
||||||
|
"jsternberg",
|
||||||
"tiborvass",
|
"tiborvass",
|
||||||
"tonistiigi",
|
"tonistiigi",
|
||||||
]
|
]
|
||||||
@@ -194,6 +195,11 @@ made through a pull request.
|
|||||||
Email = "me@jedevc.com"
|
Email = "me@jedevc.com"
|
||||||
GitHub = "jedevc"
|
GitHub = "jedevc"
|
||||||
|
|
||||||
|
[people.jsternberg]
|
||||||
|
Name = "Jonathan Sternberg"
|
||||||
|
Email = "jonathan.sternberg@docker.com"
|
||||||
|
GitHub = "jsternberg"
|
||||||
|
|
||||||
[people.thajeztah]
|
[people.thajeztah]
|
||||||
Name = "Sebastiaan van Stijn"
|
Name = "Sebastiaan van Stijn"
|
||||||
Email = "github@gone.nl"
|
Email = "github@gone.nl"
|
||||||
|
|||||||
32
Makefile
32
Makefile
@@ -8,6 +8,8 @@ endif
|
|||||||
|
|
||||||
export BUILDX_CMD ?= docker buildx
|
export BUILDX_CMD ?= docker buildx
|
||||||
|
|
||||||
|
BAKE_TARGETS := binaries binaries-cross lint lint-gopls validate-vendor validate-docs validate-authors validate-generated-files
|
||||||
|
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: binaries
|
all: binaries
|
||||||
|
|
||||||
@@ -19,13 +21,9 @@ build:
|
|||||||
shell:
|
shell:
|
||||||
./hack/shell
|
./hack/shell
|
||||||
|
|
||||||
.PHONY: binaries
|
.PHONY: $(BAKE_TARGETS)
|
||||||
binaries:
|
$(BAKE_TARGETS):
|
||||||
$(BUILDX_CMD) bake binaries
|
$(BUILDX_CMD) bake $@
|
||||||
|
|
||||||
.PHONY: binaries-cross
|
|
||||||
binaries-cross:
|
|
||||||
$(BUILDX_CMD) bake binaries-cross
|
|
||||||
|
|
||||||
.PHONY: install
|
.PHONY: install
|
||||||
install: binaries
|
install: binaries
|
||||||
@@ -39,10 +37,6 @@ release:
|
|||||||
.PHONY: validate-all
|
.PHONY: validate-all
|
||||||
validate-all: lint test validate-vendor validate-docs validate-generated-files
|
validate-all: lint test validate-vendor validate-docs validate-generated-files
|
||||||
|
|
||||||
.PHONY: lint
|
|
||||||
lint:
|
|
||||||
$(BUILDX_CMD) bake lint
|
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
./hack/test
|
./hack/test
|
||||||
@@ -55,22 +49,6 @@ test-unit:
|
|||||||
test-integration:
|
test-integration:
|
||||||
TESTPKGS=./tests ./hack/test
|
TESTPKGS=./tests ./hack/test
|
||||||
|
|
||||||
.PHONY: validate-vendor
|
|
||||||
validate-vendor:
|
|
||||||
$(BUILDX_CMD) bake validate-vendor
|
|
||||||
|
|
||||||
.PHONY: validate-docs
|
|
||||||
validate-docs:
|
|
||||||
$(BUILDX_CMD) bake validate-docs
|
|
||||||
|
|
||||||
.PHONY: validate-authors
|
|
||||||
validate-authors:
|
|
||||||
$(BUILDX_CMD) bake validate-authors
|
|
||||||
|
|
||||||
.PHONY: validate-generated-files
|
|
||||||
validate-generated-files:
|
|
||||||
$(BUILDX_CMD) bake validate-generated-files
|
|
||||||
|
|
||||||
.PHONY: test-driver
|
.PHONY: test-driver
|
||||||
test-driver:
|
test-driver:
|
||||||
./hack/test-driver
|
./hack/test-driver
|
||||||
|
|||||||
453
PROJECT.md
Normal file
453
PROJECT.md
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
# Project processing guide <!-- omit from toc -->
|
||||||
|
|
||||||
|
- [Project scope](#project-scope)
|
||||||
|
- [Labels](#labels)
|
||||||
|
- [Global](#global)
|
||||||
|
- [`area/`](#area)
|
||||||
|
- [`exp/`](#exp)
|
||||||
|
- [`impact/`](#impact)
|
||||||
|
- [`kind/`](#kind)
|
||||||
|
- [`needs/`](#needs)
|
||||||
|
- [`priority/`](#priority)
|
||||||
|
- [`status/`](#status)
|
||||||
|
- [Types of releases](#types-of-releases)
|
||||||
|
- [Feature releases](#feature-releases)
|
||||||
|
- [Release Candidates](#release-candidates)
|
||||||
|
- [Support Policy](#support-policy)
|
||||||
|
- [Contributing to Releases](#contributing-to-releases)
|
||||||
|
- [Patch releases](#patch-releases)
|
||||||
|
- [Milestones](#milestones)
|
||||||
|
- [Triage process](#triage-process)
|
||||||
|
- [Verify essential information](#verify-essential-information)
|
||||||
|
- [Classify the issue](#classify-the-issue)
|
||||||
|
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||||
|
- [Issue lifecyle](#issue-lifecyle)
|
||||||
|
- [Examples](#examples)
|
||||||
|
- [Submitting a bug](#submitting-a-bug)
|
||||||
|
- [Pull request review process](#pull-request-review-process)
|
||||||
|
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||||
|
- [Moving to a discussion](#moving-to-a-discussion)
|
||||||
|
- [Workflow automation](#workflow-automation)
|
||||||
|
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||||
|
- [Updating dependencies](#updating-dependencies)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project scope
|
||||||
|
|
||||||
|
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||||
|
|
||||||
|
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||||
|
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||||
|
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||||
|
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||||
|
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||||
|
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||||
|
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||||
|
|
||||||
|
## Labels
|
||||||
|
|
||||||
|
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||||
|
|
||||||
|
### Global
|
||||||
|
|
||||||
|
General attributes that can apply to nearly any issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||||
|
| `bot` | Issues, PRs | Created by a bot |
|
||||||
|
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||||
|
| `help wanted` | Issues, PRs | Assistance requested |
|
||||||
|
| `lgtm` | PRs | “Looks good to me” approval |
|
||||||
|
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||||
|
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||||
|
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||||
|
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||||
|
|
||||||
|
### `area/`
|
||||||
|
|
||||||
|
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------------------ | ---------- | -------------------------- |
|
||||||
|
| `area/bake` | Any | `bake` |
|
||||||
|
| `area/bake/compose` | Any | `bake/compose` |
|
||||||
|
| `area/build` | Any | `build` |
|
||||||
|
| `area/builder` | Any | `builder` |
|
||||||
|
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||||
|
| `area/cache` | Any | `cache` |
|
||||||
|
| `area/checks` | Any | `checks` |
|
||||||
|
| `area/ci` | Any | Project CI |
|
||||||
|
| `area/cli` | Any | `cli` |
|
||||||
|
| `area/controller` | Any | `controller` |
|
||||||
|
| `area/debug` | Any | `debug` |
|
||||||
|
| `area/dependencies` | Any | Project dependencies |
|
||||||
|
| `area/dockerfile` | Any | `dockerfile` |
|
||||||
|
| `area/docs` | Any | `docs` |
|
||||||
|
| `area/driver` | Any | `driver` |
|
||||||
|
| `area/driver/docker` | Any | `driver/docker` |
|
||||||
|
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||||
|
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||||
|
| `area/driver/remote` | Any | `driver/remote` |
|
||||||
|
| `area/feature-parity` | Any | `feature-parity` |
|
||||||
|
| `area/github-actions` | Any | `github-actions` |
|
||||||
|
| `area/hack` | Any | Project hack/support |
|
||||||
|
| `area/imagetools` | Any | `imagetools` |
|
||||||
|
| `area/metrics` | Any | `metrics` |
|
||||||
|
| `area/moby` | Any | Relates to `moby/moby` |
|
||||||
|
| `area/project` | Any | Project support |
|
||||||
|
| `area/qemu` | Any | `qemu` |
|
||||||
|
| `area/tests` | Any | Project testing |
|
||||||
|
| `area/windows` | Any | `windows` |
|
||||||
|
|
||||||
|
### `exp/`
|
||||||
|
|
||||||
|
Estimated experience level to complete the item
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||||
|
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||||
|
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||||
|
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||||
|
|
||||||
|
### `impact/`
|
||||||
|
|
||||||
|
Potential impact areas of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| -------------------- | ---------- | -------------------------------------------------- |
|
||||||
|
| `impact/breaking` | PR | Change is API-breaking |
|
||||||
|
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||||
|
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||||
|
|
||||||
|
|
||||||
|
### `kind/`
|
||||||
|
|
||||||
|
The type of issue, pull request or discussion
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||||
|
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||||
|
| `kind/chore` | Issue, PR | Project support tasks |
|
||||||
|
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||||
|
| `kind/duplicate` | Any | Duplicate of another item |
|
||||||
|
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||||
|
| `kind/feature` | Any | A brand new feature |
|
||||||
|
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||||
|
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||||
|
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||||
|
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||||
|
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||||
|
|
||||||
|
### `needs/`
|
||||||
|
|
||||||
|
Actions or missing requirements needed by the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||||
|
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||||
|
| `needs/code-review` | PR | Needs review of code |
|
||||||
|
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||||
|
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||||
|
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||||
|
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||||
|
| `needs/issue` | PR | Needs an issue |
|
||||||
|
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||||
|
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||||
|
| `needs/more-info` | Any | Needs more information from the author |
|
||||||
|
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||||
|
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||||
|
| `needs/pull-request` | Issue | Needs a pull request |
|
||||||
|
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||||
|
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||||
|
|
||||||
|
### `priority/`
|
||||||
|
|
||||||
|
Level of urgency of a `kind/bug` issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||||
|
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||||
|
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||||
|
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||||
|
|
||||||
|
### `status/`
|
||||||
|
|
||||||
|
Current lifecycle state of the issue or pull request.
|
||||||
|
|
||||||
|
| Label | Applies to | Description |
|
||||||
|
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||||
|
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||||
|
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||||
|
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||||
|
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||||
|
| `status/transfer` | Any | Transferred to another project |
|
||||||
|
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||||
|
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||||
|
|
||||||
|
## Types of releases
|
||||||
|
|
||||||
|
This project has feature releases, patch releases, and security releases.
|
||||||
|
|
||||||
|
### Feature releases
|
||||||
|
|
||||||
|
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||||
|
|
||||||
|
#### Release Candidates
|
||||||
|
|
||||||
|
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||||
|
|
||||||
|
#### Support Policy
|
||||||
|
|
||||||
|
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||||
|
|
||||||
|
#### Contributing to Releases
|
||||||
|
|
||||||
|
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||||
|
|
||||||
|
### Patch releases
|
||||||
|
|
||||||
|
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||||
|
|
||||||
|
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||||
|
|
||||||
|
Following PRs are included in patch releases
|
||||||
|
|
||||||
|
- `priority/P0` fixes
|
||||||
|
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||||
|
- `priority/P2` fixes, only if (both required)
|
||||||
|
- proposed by maintainer
|
||||||
|
- the patch is trivial and self-contained
|
||||||
|
- Documentation-only patches
|
||||||
|
- Vendored dependency updates, only if:
|
||||||
|
- Fixing (qualifying) bug or security issue in Buildx
|
||||||
|
- The patch is small, else a forked version of the dependency with only the patches required
|
||||||
|
|
||||||
|
New features do not qualify for patch release.
|
||||||
|
|
||||||
|
## Milestones
|
||||||
|
|
||||||
|
Milestones are used to help identify what releases a contribution will be in.
|
||||||
|
|
||||||
|
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||||
|
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||||
|
|
||||||
|
## Triage process
|
||||||
|
|
||||||
|
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||||
|
|
||||||
|
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||||
|
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||||
|
- Lowering the issue count by preventing duplicate issues.
|
||||||
|
- Streamlining the development process by preventing duplicate discussions.
|
||||||
|
|
||||||
|
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||||
|
|
||||||
|
1. Verify essential information
|
||||||
|
2. Classify the issue
|
||||||
|
3. Prioritizing the issue
|
||||||
|
|
||||||
|
### Verify essential information
|
||||||
|
|
||||||
|
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||||
|
|
||||||
|
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||||
|
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||||
|
|
||||||
|
### Classify the issue
|
||||||
|
|
||||||
|
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||||
|
|
||||||
|
- (Required) One or more [`area/*`](#area) labels
|
||||||
|
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||||
|
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||||
|
|
||||||
|
When assigning a decision the following labels should be present:
|
||||||
|
|
||||||
|
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||||
|
|
||||||
|
Additional labels can provide more clarity:
|
||||||
|
|
||||||
|
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||||
|
- Zero or more [`impact/*`](#impact) labels
|
||||||
|
- One [`exp/*`](#exp) label
|
||||||
|
|
||||||
|
## Prioritization guidelines for `kind/bug`
|
||||||
|
|
||||||
|
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||||
|
|
||||||
|
**priority/P0**
|
||||||
|
|
||||||
|
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Regression in a critical code path
|
||||||
|
- Panic in a critical code path
|
||||||
|
- Corruption in critical code path or rest of the system
|
||||||
|
- Leaked zero-day critical security
|
||||||
|
|
||||||
|
**priority/P1**
|
||||||
|
|
||||||
|
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Any regression, panic
|
||||||
|
- Measurable performance regression
|
||||||
|
- A major bug in a new feature in the latest release
|
||||||
|
- Incompatibility with upgraded external dependency
|
||||||
|
|
||||||
|
**priority/P2**
|
||||||
|
|
||||||
|
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- Confirmed bugs
|
||||||
|
- Bugs in non-default configurations
|
||||||
|
- Most enhancements
|
||||||
|
|
||||||
|
## Issue lifecyle
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
create([New issue]) --> triage
|
||||||
|
subgraph triage[Triage Loop]
|
||||||
|
review[Review]
|
||||||
|
end
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept[Accept]
|
||||||
|
close[Close]
|
||||||
|
end
|
||||||
|
triage -- if accepted --> accept[Assign status, milestone]
|
||||||
|
triage -- if rejected --> close[Assign status, close issue]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
#### Submitting a bug
|
||||||
|
|
||||||
|
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
|
||||||
|
new([New issue])
|
||||||
|
|
||||||
|
subgraph triage[Triage]
|
||||||
|
direction LR
|
||||||
|
|
||||||
|
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||||
|
style create text-align:left
|
||||||
|
|
||||||
|
subgraph review[Review]
|
||||||
|
direction TB
|
||||||
|
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style classify text-align:left
|
||||||
|
|
||||||
|
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||||
|
style update text-align:left
|
||||||
|
|
||||||
|
classify --> update
|
||||||
|
update --> classify
|
||||||
|
end
|
||||||
|
|
||||||
|
create --> review
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph decision[Decision]
|
||||||
|
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||||
|
style accept text-align: left
|
||||||
|
end
|
||||||
|
|
||||||
|
new --> triage
|
||||||
|
triage --> decision
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pull request review process
|
||||||
|
|
||||||
|
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||||
|
|
||||||
|
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||||
|
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||||
|
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
triage([Triage])
|
||||||
|
draft[Draft PR]
|
||||||
|
review[PR Review]
|
||||||
|
closed{{Close PR}}
|
||||||
|
merge{{Merge PR}}
|
||||||
|
|
||||||
|
subgraph feedback1[Feedback Loop]
|
||||||
|
draft
|
||||||
|
end
|
||||||
|
subgraph feedback2[Feedback Loop]
|
||||||
|
review
|
||||||
|
end
|
||||||
|
|
||||||
|
triage --> draft
|
||||||
|
draft --> review
|
||||||
|
review --> closed
|
||||||
|
review --> draft
|
||||||
|
review --> merge
|
||||||
|
```
|
||||||
|
|
||||||
|
## Handling stalled issues and pull requests
|
||||||
|
|
||||||
|
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||||
|
|
||||||
|
**For Authors:**
|
||||||
|
|
||||||
|
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||||
|
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||||
|
|
||||||
|
**For Maintainers:**
|
||||||
|
|
||||||
|
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||||
|
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||||
|
|
||||||
|
**Stale and Rotten Policy:**
|
||||||
|
|
||||||
|
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||||
|
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||||
|
|
||||||
|
**Skipping Stale Processing:**
|
||||||
|
|
||||||
|
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||||
|
|
||||||
|
**Exceptions to Stale Processing:**
|
||||||
|
|
||||||
|
- Issues or PRs marked as **`frozen`**.
|
||||||
|
- Issues or PRs assigned to a milestone.
|
||||||
|
|
||||||
|
## Moving to a discussion
|
||||||
|
|
||||||
|
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||||
|
|
||||||
|
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||||
|
|
||||||
|
## Workflow automation
|
||||||
|
|
||||||
|
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||||
|
|
||||||
|
- Stale issue or pull request processing
|
||||||
|
- Auto-labeling actions
|
||||||
|
- Auto-response actions
|
||||||
|
- Label carry over from issue to pull request
|
||||||
|
|
||||||
|
### Exempting an issue/PR from stale bot processing
|
||||||
|
|
||||||
|
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||||
|
|
||||||
|
- Add the item to a milestone
|
||||||
|
- Add the `frozen` label to the item
|
||||||
|
|
||||||
|
## Updating dependencies
|
||||||
|
|
||||||
|
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||||
|
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||||
|
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||||
|
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
||||||
26
README.md
26
README.md
@@ -41,12 +41,10 @@ Key features:
|
|||||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
||||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
||||||
- [`buildx install`](docs/reference/buildx_install.md)
|
|
||||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
- [`buildx rm`](docs/reference/buildx_rm.md)
|
||||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
- [`buildx stop`](docs/reference/buildx_stop.md)
|
||||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
|
||||||
- [`buildx use`](docs/reference/buildx_use.md)
|
- [`buildx use`](docs/reference/buildx_use.md)
|
||||||
- [`buildx version`](docs/reference/buildx_version.md)
|
- [`buildx version`](docs/reference/buildx_version.md)
|
||||||
- [Contributing](#contributing)
|
- [Contributing](#contributing)
|
||||||
@@ -58,8 +56,7 @@ For more information on how to use Buildx, see
|
|||||||
|
|
||||||
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
||||||
|
|
||||||
> **Warning**
|
> [!WARNING]
|
||||||
>
|
|
||||||
> Using an incompatible version of Docker may result in unexpected behavior,
|
> Using an incompatible version of Docker may result in unexpected behavior,
|
||||||
> and will likely cause issues, especially when using Buildx builders with more
|
> and will likely cause issues, especially when using Buildx builders with more
|
||||||
> recent versions of BuildKit.
|
> recent versions of BuildKit.
|
||||||
@@ -71,13 +68,13 @@ for Windows and macOS.
|
|||||||
|
|
||||||
## Linux packages
|
## Linux packages
|
||||||
|
|
||||||
Docker Linux packages also include Docker Buildx when installed using the
|
Docker Engine package repositories contain Docker Buildx packages when installed according to the
|
||||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
[Docker Engine install documentation](https://docs.docker.com/engine/install/). Install the
|
||||||
|
`docker-buildx-plugin` package to install the Buildx plugin.
|
||||||
|
|
||||||
## Manual download
|
## Manual download
|
||||||
|
|
||||||
> **Important**
|
> [!IMPORTANT]
|
||||||
>
|
|
||||||
> This section is for unattended installation of the buildx component. These
|
> This section is for unattended installation of the buildx component. These
|
||||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||||
> installing buildx using manual download in production environments as they
|
> installing buildx using manual download in production environments as they
|
||||||
@@ -108,8 +105,7 @@ On Windows:
|
|||||||
* `C:\ProgramData\Docker\cli-plugins`
|
* `C:\ProgramData\Docker\cli-plugins`
|
||||||
* `C:\Program Files\Docker\cli-plugins`
|
* `C:\Program Files\Docker\cli-plugins`
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
>
|
|
||||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||||
> ```shell
|
> ```shell
|
||||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||||
@@ -188,12 +184,12 @@ through various "drivers". Each driver defines how and where a build should
|
|||||||
run, and have different feature sets.
|
run, and have different feature sets.
|
||||||
|
|
||||||
We currently support the following drivers:
|
We currently support the following drivers:
|
||||||
- The `docker` driver ([guide](docs/manuals/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `docker` driver ([guide](https://docs.docker.com/build/drivers/docker/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `docker-container` driver ([guide](docs/manuals/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `docker-container` driver ([guide](https://docs.docker.com/build/drivers/docker-container/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `kubernetes` driver ([guide](docs/manuals/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
- The `kubernetes` driver ([guide](https://docs.docker.com/build/drivers/kubernetes/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||||
- The `remote` driver ([guide](docs/manuals/drivers/remote.md))
|
- The `remote` driver ([guide](https://docs.docker.com/build/drivers/remote/))
|
||||||
|
|
||||||
For more information on drivers, see the [drivers guide](docs/manuals/drivers/index.md).
|
For more information on drivers, see the [drivers guide](https://docs.docker.com/build/drivers/).
|
||||||
|
|
||||||
## Working with builder instances
|
## Working with builder instances
|
||||||
|
|
||||||
|
|||||||
370
bake/bake.go
370
bake/bake.go
@@ -2,7 +2,6 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -11,19 +10,24 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
composecli "github.com/compose-spec/compose-go/cli"
|
composecli "github.com/compose-spec/compose-go/v2/cli"
|
||||||
"github.com/docker/buildx/bake/hclparser"
|
"github.com/docker/buildx/bake/hclparser"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
|
dockeropts "github.com/docker/cli/opts"
|
||||||
hcl "github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/convert"
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
)
|
)
|
||||||
@@ -55,7 +59,7 @@ func defaultFilenames() []string {
|
|||||||
return names
|
return names
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadLocalFiles(names []string, stdin io.Reader) ([]File, error) {
|
func ReadLocalFiles(names []string, stdin io.Reader, l progress.SubLogger) ([]File, error) {
|
||||||
isDefault := false
|
isDefault := false
|
||||||
if len(names) == 0 {
|
if len(names) == 0 {
|
||||||
isDefault = true
|
isDefault = true
|
||||||
@@ -63,20 +67,26 @@ func ReadLocalFiles(names []string, stdin io.Reader) ([]File, error) {
|
|||||||
}
|
}
|
||||||
out := make([]File, 0, len(names))
|
out := make([]File, 0, len(names))
|
||||||
|
|
||||||
|
setStatus := func(st *client.VertexStatus) {
|
||||||
|
if l != nil {
|
||||||
|
l.SetStatus(st)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, n := range names {
|
for _, n := range names {
|
||||||
var dt []byte
|
var dt []byte
|
||||||
var err error
|
var err error
|
||||||
if n == "-" {
|
if n == "-" {
|
||||||
dt, err = io.ReadAll(stdin)
|
dt, err = readWithProgress(stdin, setStatus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dt, err = os.ReadFile(n)
|
dt, err = readFileWithProgress(n, isDefault, setStatus)
|
||||||
if err != nil {
|
if dt == nil && err == nil {
|
||||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -85,8 +95,90 @@ func ReadLocalFiles(names []string, stdin io.Reader) ([]File, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func readFileWithProgress(fname string, isDefault bool, setStatus func(st *client.VertexStatus)) (dt []byte, err error) {
|
||||||
|
st := &client.VertexStatus{
|
||||||
|
ID: "reading " + fname,
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
now := time.Now()
|
||||||
|
st.Completed = &now
|
||||||
|
if dt != nil || err != nil {
|
||||||
|
setStatus(st)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
st.Started = &now
|
||||||
|
|
||||||
|
f, err := os.Open(fname)
|
||||||
|
if err != nil {
|
||||||
|
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
setStatus(st)
|
||||||
|
|
||||||
|
info, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
st.Total = info.Size()
|
||||||
|
setStatus(st)
|
||||||
|
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
for {
|
||||||
|
n, err := f.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dt = append(dt, buf[:n]...)
|
||||||
|
st.Current += int64(n)
|
||||||
|
setStatus(st)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readWithProgress(r io.Reader, setStatus func(st *client.VertexStatus)) (dt []byte, err error) {
|
||||||
|
st := &client.VertexStatus{
|
||||||
|
ID: "reading from stdin",
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
now := time.Now()
|
||||||
|
st.Completed = &now
|
||||||
|
setStatus(st)
|
||||||
|
}()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
st.Started = &now
|
||||||
|
setStatus(st)
|
||||||
|
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
for {
|
||||||
|
n, err := r.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dt = append(dt, buf[:n]...)
|
||||||
|
st.Current += int64(n)
|
||||||
|
setStatus(st)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dt, nil
|
||||||
|
}
|
||||||
|
|
||||||
func ListTargets(files []File) ([]string, error) {
|
func ListTargets(files []File) ([]string, error) {
|
||||||
c, err := ParseFiles(files, nil)
|
c, _, err := ParseFiles(files, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -101,7 +193,7 @@ func ListTargets(files []File) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) {
|
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) {
|
||||||
c, err := ParseFiles(files, defaults)
|
c, _, err := ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -207,7 +299,7 @@ func sliceToMap(env []string) (res map[string]string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
func ParseFiles(files []File, defaults map[string]string) (_ *Config, _ *hclparser.ParseMeta, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
err = formatHCLError(err, files)
|
err = formatHCLError(err, files)
|
||||||
}()
|
}()
|
||||||
@@ -219,7 +311,7 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
isCompose, composeErr := validateComposeFile(f.Data, f.Name)
|
isCompose, composeErr := validateComposeFile(f.Data, f.Name)
|
||||||
if isCompose {
|
if isCompose {
|
||||||
if composeErr != nil {
|
if composeErr != nil {
|
||||||
return nil, composeErr
|
return nil, nil, composeErr
|
||||||
}
|
}
|
||||||
composeFiles = append(composeFiles, f)
|
composeFiles = append(composeFiles, f)
|
||||||
}
|
}
|
||||||
@@ -227,13 +319,13 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
||||||
if isHCL {
|
if isHCL {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
hclFiles = append(hclFiles, hf)
|
hclFiles = append(hclFiles, hf)
|
||||||
} else if composeErr != nil {
|
} else if composeErr != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse %s: parsing yaml: %v, parsing hcl", f.Name, composeErr)
|
return nil, nil, errors.Wrapf(err, "failed to parse %s: parsing yaml: %v, parsing hcl", f.Name, composeErr)
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -241,23 +333,24 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
if len(composeFiles) > 0 {
|
if len(composeFiles) > 0 {
|
||||||
cfg, cmperr := ParseComposeFiles(composeFiles)
|
cfg, cmperr := ParseComposeFiles(composeFiles)
|
||||||
if cmperr != nil {
|
if cmperr != nil {
|
||||||
return nil, errors.Wrap(cmperr, "failed to parse compose file")
|
return nil, nil, errors.Wrap(cmperr, "failed to parse compose file")
|
||||||
}
|
}
|
||||||
c = mergeConfig(c, *cfg)
|
c = mergeConfig(c, *cfg)
|
||||||
c = dedupeConfig(c)
|
c = dedupeConfig(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var pm hclparser.ParseMeta
|
||||||
if len(hclFiles) > 0 {
|
if len(hclFiles) > 0 {
|
||||||
renamed, err := hclparser.Parse(hcl.MergeFiles(hclFiles), hclparser.Opt{
|
res, err := hclparser.Parse(hclparser.MergeFiles(hclFiles), hclparser.Opt{
|
||||||
LookupVar: os.LookupEnv,
|
LookupVar: os.LookupEnv,
|
||||||
Vars: defaults,
|
Vars: defaults,
|
||||||
ValidateLabel: validateTargetName,
|
ValidateLabel: validateTargetName,
|
||||||
}, &c)
|
}, &c)
|
||||||
if err.HasErrors() {
|
if err.HasErrors() {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, renamed := range renamed {
|
for _, renamed := range res.Renamed {
|
||||||
for oldName, newNames := range renamed {
|
for oldName, newNames := range renamed {
|
||||||
newNames = dedupSlice(newNames)
|
newNames = dedupSlice(newNames)
|
||||||
if len(newNames) == 1 && oldName == newNames[0] {
|
if len(newNames) == 1 && oldName == newNames[0] {
|
||||||
@@ -270,9 +363,10 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
c = dedupeConfig(c)
|
c = dedupeConfig(c)
|
||||||
|
pm = *res
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, &pm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupeConfig(c Config) Config {
|
func dedupeConfig(c Config) Config {
|
||||||
@@ -297,7 +391,8 @@ func dedupeConfig(c Config) Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ParseFile(dt []byte, fn string) (*Config, error) {
|
func ParseFile(dt []byte, fn string) (*Config, error) {
|
||||||
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
c, _, err := ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||||
|
return c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
@@ -400,7 +495,7 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t2.Outputs = nil
|
t2.Outputs = []string{"type=cacheonly"}
|
||||||
t2.linked = true
|
t2.linked = true
|
||||||
m[target] = t2
|
m[target] = t2
|
||||||
}
|
}
|
||||||
@@ -448,7 +543,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
o := t[kk[1]]
|
o := t[kk[1]]
|
||||||
|
|
||||||
switch keys[1] {
|
switch keys[1] {
|
||||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest":
|
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.ArrValue = append(o.ArrValue, parts[1])
|
o.ArrValue = append(o.ArrValue, parts[1])
|
||||||
}
|
}
|
||||||
@@ -579,17 +674,20 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
|
|
||||||
type Group struct {
|
type Group struct {
|
||||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||||
|
Description string `json:"description,omitempty" hcl:"description,optional" cty:"description"`
|
||||||
Targets []string `json:"targets" hcl:"targets" cty:"targets"`
|
Targets []string `json:"targets" hcl:"targets" cty:"targets"`
|
||||||
// Target // TODO?
|
// Target // TODO?
|
||||||
}
|
}
|
||||||
|
|
||||||
type Target struct {
|
type Target struct {
|
||||||
Name string `json:"-" hcl:"name,label" cty:"name"`
|
Name string `json:"-" hcl:"name,label" cty:"name"`
|
||||||
|
Description string `json:"description,omitempty" hcl:"description,optional" cty:"description"`
|
||||||
|
|
||||||
// Inherits is the only field that cannot be overridden with --set
|
// Inherits is the only field that cannot be overridden with --set
|
||||||
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
|
||||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
||||||
|
|
||||||
|
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
|
||||||
|
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
||||||
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
||||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
||||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
||||||
@@ -606,9 +704,13 @@ type Target struct {
|
|||||||
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
||||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
||||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
||||||
NetworkMode *string `json:"-" hcl:"-" cty:"-"`
|
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
|
||||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/bake-reference.md.
|
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
||||||
|
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
||||||
|
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
||||||
|
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
||||||
|
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
||||||
|
|
||||||
// linked is a private field to mark a target used as a linked one
|
// linked is a private field to mark a target used as a linked one
|
||||||
linked bool
|
linked bool
|
||||||
@@ -620,6 +722,7 @@ var _ hclparser.WithEvalContexts = &Group{}
|
|||||||
var _ hclparser.WithGetName = &Group{}
|
var _ hclparser.WithGetName = &Group{}
|
||||||
|
|
||||||
func (t *Target) normalize() {
|
func (t *Target) normalize() {
|
||||||
|
t.Annotations = removeDupes(t.Annotations)
|
||||||
t.Attest = removeAttestDupes(t.Attest)
|
t.Attest = removeAttestDupes(t.Attest)
|
||||||
t.Tags = removeDupes(t.Tags)
|
t.Tags = removeDupes(t.Tags)
|
||||||
t.Secrets = removeDupes(t.Secrets)
|
t.Secrets = removeDupes(t.Secrets)
|
||||||
@@ -629,6 +732,13 @@ func (t *Target) normalize() {
|
|||||||
t.CacheTo = removeDupes(t.CacheTo)
|
t.CacheTo = removeDupes(t.CacheTo)
|
||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = removeDupes(t.Outputs)
|
||||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
||||||
|
t.Ulimits = removeDupes(t.Ulimits)
|
||||||
|
|
||||||
|
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
||||||
|
t.Entitlements = append(t.Entitlements, "network.host")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Entitlements = removeDupes(t.Entitlements)
|
||||||
|
|
||||||
for k, v := range t.Contexts {
|
for k, v := range t.Contexts {
|
||||||
if v == "" {
|
if v == "" {
|
||||||
@@ -680,6 +790,12 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
if t2.Target != nil {
|
if t2.Target != nil {
|
||||||
t.Target = t2.Target
|
t.Target = t2.Target
|
||||||
}
|
}
|
||||||
|
if t2.Call != nil {
|
||||||
|
t.Call = t2.Call
|
||||||
|
}
|
||||||
|
if t2.Annotations != nil { // merge
|
||||||
|
t.Annotations = append(t.Annotations, t2.Annotations...)
|
||||||
|
}
|
||||||
if t2.Attest != nil { // merge
|
if t2.Attest != nil { // merge
|
||||||
t.Attest = append(t.Attest, t2.Attest...)
|
t.Attest = append(t.Attest, t2.Attest...)
|
||||||
t.Attest = removeAttestDupes(t.Attest)
|
t.Attest = removeAttestDupes(t.Attest)
|
||||||
@@ -714,6 +830,18 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
if t2.NoCacheFilter != nil { // merge
|
if t2.NoCacheFilter != nil { // merge
|
||||||
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
||||||
}
|
}
|
||||||
|
if t2.ShmSize != nil { // no merge
|
||||||
|
t.ShmSize = t2.ShmSize
|
||||||
|
}
|
||||||
|
if t2.Ulimits != nil { // merge
|
||||||
|
t.Ulimits = append(t.Ulimits, t2.Ulimits...)
|
||||||
|
}
|
||||||
|
if t2.Description != "" {
|
||||||
|
t.Description = t2.Description
|
||||||
|
}
|
||||||
|
if t2.Entitlements != nil { // merge
|
||||||
|
t.Entitlements = append(t.Entitlements, t2.Entitlements...)
|
||||||
|
}
|
||||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -758,6 +886,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.CacheTo = o.ArrValue
|
t.CacheTo = o.ArrValue
|
||||||
case "target":
|
case "target":
|
||||||
t.Target = &value
|
t.Target = &value
|
||||||
|
case "call":
|
||||||
|
t.Call = &value
|
||||||
case "secrets":
|
case "secrets":
|
||||||
t.Secrets = o.ArrValue
|
t.Secrets = o.ArrValue
|
||||||
case "ssh":
|
case "ssh":
|
||||||
@@ -766,6 +896,10 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.Platforms = o.ArrValue
|
t.Platforms = o.ArrValue
|
||||||
case "output":
|
case "output":
|
||||||
t.Outputs = o.ArrValue
|
t.Outputs = o.ArrValue
|
||||||
|
case "entitlements":
|
||||||
|
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
||||||
|
case "annotations":
|
||||||
|
t.Annotations = append(t.Annotations, o.ArrValue...)
|
||||||
case "attest":
|
case "attest":
|
||||||
t.Attest = append(t.Attest, o.ArrValue...)
|
t.Attest = append(t.Attest, o.ArrValue...)
|
||||||
case "no-cache":
|
case "no-cache":
|
||||||
@@ -776,6 +910,12 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.NoCache = &noCache
|
t.NoCache = &noCache
|
||||||
case "no-cache-filter":
|
case "no-cache-filter":
|
||||||
t.NoCacheFilter = o.ArrValue
|
t.NoCacheFilter = o.ArrValue
|
||||||
|
case "shm-size":
|
||||||
|
t.ShmSize = &value
|
||||||
|
case "ulimits":
|
||||||
|
t.Ulimits = o.ArrValue
|
||||||
|
case "network":
|
||||||
|
t.NetworkMode = &value
|
||||||
case "pull":
|
case "pull":
|
||||||
pull, err := strconv.ParseBool(value)
|
pull, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -783,19 +923,17 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
}
|
}
|
||||||
t.Pull = &pull
|
t.Pull = &pull
|
||||||
case "push":
|
case "push":
|
||||||
_, err := strconv.ParseBool(value)
|
push, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("invalid value %s for boolean key push", value)
|
return errors.Errorf("invalid value %s for boolean key push", value)
|
||||||
}
|
}
|
||||||
if len(t.Outputs) == 0 {
|
t.Outputs = setPushOverride(t.Outputs, push)
|
||||||
t.Outputs = append(t.Outputs, "type=image,push=true")
|
case "load":
|
||||||
} else {
|
load, err := strconv.ParseBool(value)
|
||||||
for i, output := range t.Outputs {
|
if err != nil {
|
||||||
if typ := parseOutputType(output); typ == "image" || typ == "registry" {
|
return errors.Errorf("invalid value %s for boolean key load", value)
|
||||||
t.Outputs[i] = t.Outputs[i] + ",push=" + value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
t.Outputs = setLoadOverride(t.Outputs, load)
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("unknown key: %s", keys[0])
|
return errors.Errorf("unknown key: %s", keys[0])
|
||||||
}
|
}
|
||||||
@@ -852,9 +990,11 @@ func (t *Target) GetEvalContexts(ectx *hcl.EvalContext, block *hcl.Block, loadDe
|
|||||||
for _, e := range ectxs {
|
for _, e := range ectxs {
|
||||||
e2 := ectx.NewChild()
|
e2 := ectx.NewChild()
|
||||||
e2.Variables = make(map[string]cty.Value)
|
e2.Variables = make(map[string]cty.Value)
|
||||||
|
if e != ectx {
|
||||||
for k, v := range e.Variables {
|
for k, v := range e.Variables {
|
||||||
e2.Variables[k] = v
|
e2.Variables[k] = v
|
||||||
}
|
}
|
||||||
|
}
|
||||||
e2.Variables[k] = v
|
e2.Variables[k] = v
|
||||||
ectxs2 = append(ectxs2, e2)
|
ectxs2 = append(ectxs2, e2)
|
||||||
}
|
}
|
||||||
@@ -912,12 +1052,17 @@ func (t *Target) GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||||
|
// make sure local credentials are loaded multiple times for different targets
|
||||||
|
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
||||||
|
authProvider := authprovider.NewDockerAuthProvider(dockerConfig, nil)
|
||||||
|
|
||||||
m2 := make(map[string]build.Options, len(m))
|
m2 := make(map[string]build.Options, len(m))
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
bo, err := toBuildOpt(v, inp)
|
bo, err := toBuildOpt(v, inp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
bo.Session = append(bo.Session, authProvider)
|
||||||
m2[k] = *bo
|
m2[k] = *bo
|
||||||
}
|
}
|
||||||
return m2, nil
|
return m2, nil
|
||||||
@@ -1038,6 +1183,9 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if t.Dockerfile != nil {
|
if t.Dockerfile != nil {
|
||||||
dockerfilePath = *t.Dockerfile
|
dockerfilePath = *t.Dockerfile
|
||||||
}
|
}
|
||||||
|
if !strings.HasPrefix(dockerfilePath, "cwd://") {
|
||||||
|
dockerfilePath = path.Clean(dockerfilePath)
|
||||||
|
}
|
||||||
|
|
||||||
bi := build.Inputs{
|
bi := build.Inputs{
|
||||||
ContextPath: contextPath,
|
ContextPath: contextPath,
|
||||||
@@ -1048,12 +1196,44 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
bi.DockerfileInline = *t.DockerfileInline
|
bi.DockerfileInline = *t.DockerfileInline
|
||||||
}
|
}
|
||||||
updateContext(&bi, inp)
|
updateContext(&bi, inp)
|
||||||
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !path.IsAbs(bi.DockerfilePath) {
|
if strings.HasPrefix(bi.DockerfilePath, "cwd://") {
|
||||||
bi.DockerfilePath = path.Join(bi.ContextPath, bi.DockerfilePath)
|
// If Dockerfile is local for a remote invocation, we first check if
|
||||||
|
// it's not outside the working directory and then resolve it to an
|
||||||
|
// absolute path.
|
||||||
|
bi.DockerfilePath = path.Clean(strings.TrimPrefix(bi.DockerfilePath, "cwd://"))
|
||||||
|
if err := checkPath(bi.DockerfilePath); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
bi.DockerfilePath, err = filepath.Abs(bi.DockerfilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !build.IsRemoteURL(bi.DockerfilePath) && strings.HasPrefix(bi.ContextPath, "cwd://") && (inp != nil && build.IsRemoteURL(inp.URL)) {
|
||||||
|
// We don't currently support reading a remote Dockerfile with a local
|
||||||
|
// context when doing a remote invocation because we automatically
|
||||||
|
// derive the dockerfile from the context atm:
|
||||||
|
//
|
||||||
|
// target "default" {
|
||||||
|
// context = BAKE_CMD_CONTEXT
|
||||||
|
// dockerfile = "Dockerfile.app"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// > docker buildx bake https://github.com/foo/bar.git
|
||||||
|
// failed to solve: failed to read dockerfile: open /var/lib/docker/tmp/buildkit-mount3004544897/Dockerfile.app: no such file or directory
|
||||||
|
//
|
||||||
|
// To avoid mistakenly reading a local Dockerfile, we check if the
|
||||||
|
// Dockerfile exists locally and if so, we error out.
|
||||||
|
if _, err := os.Stat(filepath.Join(path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://")), bi.DockerfilePath)); err == nil {
|
||||||
|
return nil, errors.Errorf("reading a dockerfile for a remote build invocation is currently not supported")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||||
}
|
}
|
||||||
|
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !path.IsAbs(bi.DockerfilePath) {
|
||||||
|
bi.DockerfilePath = path.Join(bi.ContextPath, bi.DockerfilePath)
|
||||||
|
}
|
||||||
for k, v := range bi.NamedContexts {
|
for k, v := range bi.NamedContexts {
|
||||||
if strings.HasPrefix(v.Path, "cwd://") {
|
if strings.HasPrefix(v.Path, "cwd://") {
|
||||||
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
||||||
@@ -1094,6 +1274,12 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if t.NetworkMode != nil {
|
if t.NetworkMode != nil {
|
||||||
networkMode = *t.NetworkMode
|
networkMode = *t.NetworkMode
|
||||||
}
|
}
|
||||||
|
shmSize := new(dockeropts.MemBytes)
|
||||||
|
if t.ShmSize != nil {
|
||||||
|
if err := shmSize.Set(*t.ShmSize); err != nil {
|
||||||
|
return nil, errors.Errorf("invalid value %s for membytes key shm-size", *t.ShmSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bo := &build.Options{
|
bo := &build.Options{
|
||||||
Inputs: bi,
|
Inputs: bi,
|
||||||
@@ -1105,6 +1291,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
Pull: pull,
|
Pull: pull,
|
||||||
NetworkMode: networkMode,
|
NetworkMode: networkMode,
|
||||||
Linked: t.linked,
|
Linked: t.linked,
|
||||||
|
ShmSize: *shmSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(t.Platforms)
|
platforms, err := platformutil.Parse(t.Platforms)
|
||||||
@@ -1113,9 +1300,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
bo.Platforms = platforms
|
bo.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
|
||||||
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(dockerConfig))
|
|
||||||
|
|
||||||
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1143,6 +1327,12 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
bo.Target = *t.Target
|
bo.Target = *t.Target
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.Call != nil {
|
||||||
|
bo.CallFunc = &build.CallFunc{
|
||||||
|
Name: *t.Call,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1164,6 +1354,16 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
annotations, err := buildflags.ParseAnnotations(t.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, e := range bo.Exports {
|
||||||
|
for k, v := range annotations {
|
||||||
|
e.Attrs[k.String()] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
attests, err := buildflags.ParseAttests(t.Attest)
|
attests, err := buildflags.ParseAttests(t.Attest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1175,6 +1375,18 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ulimits := dockeropts.NewUlimitOpt(nil)
|
||||||
|
for _, field := range t.Ulimits {
|
||||||
|
if err := ulimits.Set(field); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bo.Ulimits = ulimits
|
||||||
|
|
||||||
|
for _, ent := range t.Entitlements {
|
||||||
|
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
||||||
|
}
|
||||||
|
|
||||||
return bo, nil
|
return bo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1219,23 +1431,89 @@ func removeAttestDupes(s []string) []string {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOutputType(str string) string {
|
func parseOutput(str string) map[string]string {
|
||||||
csvReader := csv.NewReader(strings.NewReader(str))
|
fields, err := csvvalue.Fields(str, nil)
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return nil
|
||||||
}
|
}
|
||||||
|
res := map[string]string{}
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
parts := strings.SplitN(field, "=", 2)
|
parts := strings.SplitN(field, "=", 2)
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
if parts[0] == "type" {
|
res[parts[0]] = parts[1]
|
||||||
return parts[1]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOutputType(str string) string {
|
||||||
|
if out := parseOutput(str); out != nil {
|
||||||
|
if v, ok := out["type"]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setPushOverride(outputs []string, push bool) []string {
|
||||||
|
var out []string
|
||||||
|
setPush := true
|
||||||
|
for _, output := range outputs {
|
||||||
|
typ := parseOutputType(output)
|
||||||
|
if typ == "image" || typ == "registry" {
|
||||||
|
// no need to set push if image or registry types already defined
|
||||||
|
setPush = false
|
||||||
|
if typ == "registry" {
|
||||||
|
if !push {
|
||||||
|
// don't set registry output if "push" is false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// no need to set "push" attribute to true for registry
|
||||||
|
out = append(out, output)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, output+",push="+strconv.FormatBool(push))
|
||||||
|
} else {
|
||||||
|
if typ != "docker" {
|
||||||
|
// if there is any output that is not docker, don't set "push"
|
||||||
|
setPush = false
|
||||||
|
}
|
||||||
|
out = append(out, output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if push && setPush {
|
||||||
|
out = append(out, "type=image,push=true")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLoadOverride(outputs []string, load bool) []string {
|
||||||
|
if !load {
|
||||||
|
return outputs
|
||||||
|
}
|
||||||
|
setLoad := true
|
||||||
|
for _, output := range outputs {
|
||||||
|
if typ := parseOutputType(output); typ == "docker" {
|
||||||
|
if v := parseOutput(output); v != nil {
|
||||||
|
// dest set means we want to output as tar so don't set load
|
||||||
|
if _, ok := v["dest"]; !ok {
|
||||||
|
setLoad = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if typ != "image" && typ != "registry" && typ != "oci" {
|
||||||
|
// if there is any output that is not an image, registry
|
||||||
|
// or oci, don't set "load" similar to push override
|
||||||
|
setLoad = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if setLoad {
|
||||||
|
outputs = append(outputs, "type=docker")
|
||||||
|
}
|
||||||
|
return outputs
|
||||||
|
}
|
||||||
|
|
||||||
func validateTargetName(name string) error {
|
func validateTargetName(name string) error {
|
||||||
if !targetNamePattern.MatchString(name) {
|
if !targetNamePattern.MatchString(name) {
|
||||||
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
||||||
|
|||||||
@@ -3,10 +3,13 @@ package bake
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,6 +23,8 @@ target "webDEP" {
|
|||||||
VAR_BOTH = "webDEP"
|
VAR_BOTH = "webDEP"
|
||||||
}
|
}
|
||||||
no-cache = true
|
no-cache = true
|
||||||
|
shm-size = "128m"
|
||||||
|
ulimits = ["nofile=1024:1024"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "webapp" {
|
target "webapp" {
|
||||||
@@ -43,6 +48,8 @@ target "webapp" {
|
|||||||
require.Equal(t, ".", *m["webapp"].Context)
|
require.Equal(t, ".", *m["webapp"].Context)
|
||||||
require.Equal(t, ptrstr("webDEP"), m["webapp"].Args["VAR_INHERITED"])
|
require.Equal(t, ptrstr("webDEP"), m["webapp"].Args["VAR_INHERITED"])
|
||||||
require.Equal(t, true, *m["webapp"].NoCache)
|
require.Equal(t, true, *m["webapp"].NoCache)
|
||||||
|
require.Equal(t, "128m", *m["webapp"].ShmSize)
|
||||||
|
require.Equal(t, []string{"nofile=1024:1024"}, m["webapp"].Ulimits)
|
||||||
require.Nil(t, m["webapp"].Pull)
|
require.Nil(t, m["webapp"].Pull)
|
||||||
|
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
@@ -127,6 +134,12 @@ target "webapp" {
|
|||||||
require.Equal(t, []string{"webapp"}, g["default"].Targets)
|
require.Equal(t, []string{"webapp"}, g["default"].Targets)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("ShmSizeOverride", func(t *testing.T) {
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.shm-size=256m"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "256m", *m["webapp"].ShmSize)
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("PullOverride", func(t *testing.T) {
|
t.Run("PullOverride", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
||||||
@@ -205,8 +218,20 @@ target "webapp" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPushOverride(t *testing.T) {
|
func TestPushOverride(t *testing.T) {
|
||||||
t.Parallel()
|
t.Run("empty output", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type image", func(t *testing.T) {
|
||||||
fp := File{
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
@@ -214,39 +239,231 @@ func TestPushOverride(t *testing.T) {
|
|||||||
output = ["type=image,compression=zstd"]
|
output = ["type=image,compression=zstd"]
|
||||||
}`),
|
}`),
|
||||||
}
|
}
|
||||||
ctx := context.TODO()
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
||||||
|
})
|
||||||
|
|
||||||
fp = File{
|
t.Run("type image push false", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
`target "app" {
|
`target "app" {
|
||||||
output = ["type=image,compression=zstd"]
|
output = ["type=image,compression=zstd"]
|
||||||
}`),
|
}`),
|
||||||
}
|
}
|
||||||
ctx = context.TODO()
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
||||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
||||||
|
})
|
||||||
|
|
||||||
fp = File{
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=registry"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type registry push false", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=registry"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(m["app"].Outputs))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type local and empty target", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "foo" {
|
||||||
|
output = [ "type=local,dest=out" ]
|
||||||
|
}
|
||||||
|
target "bar" {
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"foo", "bar"}, []string{"*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m))
|
||||||
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
||||||
|
require.Equal(t, 1, len(m["bar"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=image,push=true"}, m["bar"].Outputs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOverride(t *testing.T) {
|
||||||
|
t.Run("empty output", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
`target "app" {
|
`target "app" {
|
||||||
}`),
|
}`),
|
||||||
}
|
}
|
||||||
ctx = context.TODO()
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
|
require.Equal(t, "type=docker", m["app"].Outputs[0])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type docker", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=docker"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=docker"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type image", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=image"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=image", "type=docker"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type image load false", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=image"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=image"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=registry"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=registry", "type=docker"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type oci", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=oci,dest=out"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=oci,dest=out", "type=docker"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type docker with dest", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=docker,dest=out"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=docker,dest=out", "type=docker"}, m["app"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type local and empty target", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "foo" {
|
||||||
|
output = [ "type=local,dest=out" ]
|
||||||
|
}
|
||||||
|
target "bar" {
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"foo", "bar"}, []string{"*.load=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m))
|
||||||
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
||||||
|
require.Equal(t, 1, len(m["bar"].Outputs))
|
||||||
|
require.Equal(t, []string{"type=docker"}, m["bar"].Outputs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadAndPushOverride(t *testing.T) {
|
||||||
|
t.Run("type local and empty target", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "foo" {
|
||||||
|
output = [ "type=local,dest=out" ]
|
||||||
|
}
|
||||||
|
target "bar" {
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"foo", "bar"}, []string{"*.load=true", "*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(m))
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
|
sort.Strings(m["foo"].Outputs)
|
||||||
|
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(m["bar"].Outputs))
|
||||||
|
sort.Strings(m["bar"].Outputs)
|
||||||
|
require.Equal(t, []string{"type=docker", "type=image,push=true"}, m["bar"].Outputs)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "foo" {
|
||||||
|
output = [ "type=registry" ]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"foo"}, []string{"*.load=true", "*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(m["foo"].Outputs))
|
||||||
|
sort.Strings(m["foo"].Outputs)
|
||||||
|
require.Equal(t, []string{"type=docker", "type=registry"}, m["foo"].Outputs)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTargetsCompose(t *testing.T) {
|
func TestReadTargetsCompose(t *testing.T) {
|
||||||
@@ -373,7 +590,7 @@ services:
|
|||||||
require.Equal(t, []string{"web_app"}, g["default"].Targets)
|
require.Equal(t, []string{"web_app"}, g["default"].Targets)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHCLCwdPrefix(t *testing.T) {
|
func TestHCLContextCwdPrefix(t *testing.T) {
|
||||||
fp := File{
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
@@ -386,18 +603,49 @@ func TestHCLCwdPrefix(t *testing.T) {
|
|||||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(m))
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
_, ok := m["app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
_, err = TargetsToBuildOpt(m, &Input{})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, "test", *m["app"].Dockerfile)
|
|
||||||
require.Equal(t, "foo", *m["app"].Context)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
assert.Equal(t, "test", *m["app"].Dockerfile)
|
||||||
|
assert.Equal(t, "foo", *m["app"].Context)
|
||||||
|
assert.Equal(t, "foo/test", bo["app"].Inputs.DockerfilePath)
|
||||||
|
assert.Equal(t, "foo", bo["app"].Inputs.ContextPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLDockerfileCwdPrefix(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
context = "."
|
||||||
|
dockerfile = "cwd://Dockerfile.app"
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
assert.Equal(t, "cwd://Dockerfile.app", *m["app"].Dockerfile)
|
||||||
|
assert.Equal(t, ".", *m["app"].Context)
|
||||||
|
assert.Equal(t, filepath.Join(cwd, "Dockerfile.app"), bo["app"].Inputs.DockerfilePath)
|
||||||
|
assert.Equal(t, ".", bo["app"].Inputs.ContextPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideMerge(t *testing.T) {
|
func TestOverrideMerge(t *testing.T) {
|
||||||
@@ -591,7 +839,8 @@ func TestReadContextFromTargetChain(t *testing.T) {
|
|||||||
|
|
||||||
mid, ok := m["mid"]
|
mid, ok := m["mid"]
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.Equal(t, 0, len(mid.Outputs))
|
require.Equal(t, 1, len(mid.Outputs))
|
||||||
|
require.Equal(t, "type=cacheonly", mid.Outputs[0])
|
||||||
require.Equal(t, 1, len(mid.Contexts))
|
require.Equal(t, 1, len(mid.Contexts))
|
||||||
|
|
||||||
base, ok := m["base"]
|
base, ok := m["base"]
|
||||||
@@ -1281,7 +1530,7 @@ services:
|
|||||||
v2: "bar"
|
v2: "bar"
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.foo"},
|
{Data: dt, Name: "c1.foo"},
|
||||||
{Data: dt2, Name: "c2.bar"},
|
{Data: dt2, Name: "c2.bar"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -1398,7 +1647,7 @@ func TestReadLocalFilesDefault(t *testing.T) {
|
|||||||
for _, tf := range tt.filenames {
|
for _, tf := range tt.filenames {
|
||||||
require.NoError(t, os.WriteFile(tf, []byte(tf), 0644))
|
require.NoError(t, os.WriteFile(tf, []byte(tf), 0644))
|
||||||
}
|
}
|
||||||
files, err := ReadLocalFiles(nil, nil)
|
files, err := ReadLocalFiles(nil, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
require.Equal(t, len(tt.expected), len(files))
|
require.Equal(t, len(tt.expected), len(files))
|
||||||
@@ -1450,3 +1699,160 @@ func TestAttestDuplicates(t *testing.T) {
|
|||||||
"provenance": ptrstr("type=provenance,mode=max"),
|
"provenance": ptrstr("type=provenance,mode=max"),
|
||||||
}, opts["default"].Attests)
|
}, opts["default"].Attests)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAnnotations(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=image,name=foo"]
|
||||||
|
annotations = ["manifest[linux/amd64]:foo=bar"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
require.Equal(t, "type=image,name=foo", m["app"].Outputs[0])
|
||||||
|
require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0])
|
||||||
|
|
||||||
|
require.Len(t, bo["app"].Exports, 1)
|
||||||
|
require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLEntitlements(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
entitlements = ["security.insecure", "network.host"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
require.Len(t, m["app"].Entitlements, 2)
|
||||||
|
require.Equal(t, "security.insecure", m["app"].Entitlements[0])
|
||||||
|
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
||||||
|
|
||||||
|
require.Len(t, bo["app"].Allow, 2)
|
||||||
|
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
|
||||||
|
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
dockerfile = "app.Dockerfile"
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
|
fp2 := File{
|
||||||
|
Name: "docker-compose.yml",
|
||||||
|
Data: []byte(
|
||||||
|
`services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
network: "host"
|
||||||
|
`),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
require.Len(t, m["app"].Entitlements, 1)
|
||||||
|
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||||
|
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||||
|
|
||||||
|
require.Len(t, bo["app"].Allow, 1)
|
||||||
|
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||||
|
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntitlementsForNetHost(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
dockerfile = "app.Dockerfile"
|
||||||
|
network = "host"
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
require.Len(t, m["app"].Entitlements, 1)
|
||||||
|
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||||
|
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||||
|
|
||||||
|
require.Len(t, bo["app"].Allow, 1)
|
||||||
|
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||||
|
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetNone(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
dockerfile = "app.Dockerfile"
|
||||||
|
network = "none"
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Contains(t, m, "app")
|
||||||
|
require.Len(t, m["app"].Entitlements, 0)
|
||||||
|
require.Equal(t, "none", *m["app"].NetworkMode)
|
||||||
|
|
||||||
|
require.Len(t, bo["app"].Allow, 0)
|
||||||
|
require.Equal(t, "none", bo["app"].NetworkMode)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,13 +1,19 @@
|
|||||||
package bake
|
package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/dotenv"
|
"github.com/compose-spec/compose-go/v2/consts"
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
"github.com/compose-spec/compose-go/v2/loader"
|
||||||
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
dockeropts "github.com/docker/cli/opts"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
@@ -17,9 +23,9 @@ func ParseComposeFiles(fs []File) (*Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var cfgs []compose.ConfigFile
|
var cfgs []composetypes.ConfigFile
|
||||||
for _, f := range fs {
|
for _, f := range fs {
|
||||||
cfgs = append(cfgs, compose.ConfigFile{
|
cfgs = append(cfgs, composetypes.ConfigFile{
|
||||||
Filename: f.Name,
|
Filename: f.Name,
|
||||||
Content: f.Data,
|
Content: f.Data,
|
||||||
})
|
})
|
||||||
@@ -27,16 +33,21 @@ func ParseComposeFiles(fs []File) (*Config, error) {
|
|||||||
return ParseCompose(cfgs, envs)
|
return ParseCompose(cfgs, envs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) {
|
func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Config, error) {
|
||||||
if envs == nil {
|
if envs == nil {
|
||||||
envs = make(map[string]string)
|
envs = make(map[string]string)
|
||||||
}
|
}
|
||||||
cfg, err := loader.Load(compose.ConfigDetails{
|
cfg, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||||
ConfigFiles: cfgs,
|
ConfigFiles: cfgs,
|
||||||
Environment: envs,
|
Environment: envs,
|
||||||
}, func(options *loader.Options) {
|
}, func(options *loader.Options) {
|
||||||
options.SetProjectName("bake", false)
|
projectName := "bake"
|
||||||
|
if v, ok := envs[consts.ComposeProjectName]; ok && v != "" {
|
||||||
|
projectName = v
|
||||||
|
}
|
||||||
|
options.SetProjectName(projectName, false)
|
||||||
options.SkipNormalization = true
|
options.SkipNormalization = true
|
||||||
|
options.Profiles = []string{"*"}
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -50,6 +61,7 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
|
|||||||
g := &Group{Name: "default"}
|
g := &Group{Name: "default"}
|
||||||
|
|
||||||
for _, s := range cfg.Services {
|
for _, s := range cfg.Services {
|
||||||
|
s := s
|
||||||
if s.Build == nil {
|
if s.Build == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -83,6 +95,31 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var shmSize *string
|
||||||
|
if s.Build.ShmSize > 0 {
|
||||||
|
shmSizeBytes := dockeropts.MemBytes(s.Build.ShmSize)
|
||||||
|
shmSizeStr := shmSizeBytes.String()
|
||||||
|
shmSize = &shmSizeStr
|
||||||
|
}
|
||||||
|
|
||||||
|
var ulimits []string
|
||||||
|
if s.Build.Ulimits != nil {
|
||||||
|
for n, u := range s.Build.Ulimits {
|
||||||
|
ulimit, err := units.ParseUlimit(fmt.Sprintf("%s=%d:%d", n, u.Soft, u.Hard))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ulimits = append(ulimits, ulimit.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var ssh []string
|
||||||
|
for _, bkey := range s.Build.SSH {
|
||||||
|
sshkey := composeToBuildkitSSH(bkey)
|
||||||
|
ssh = append(ssh, sshkey)
|
||||||
|
}
|
||||||
|
sort.Strings(ssh)
|
||||||
|
|
||||||
var secrets []string
|
var secrets []string
|
||||||
for _, bs := range s.Build.Secrets {
|
for _, bs := range s.Build.Secrets {
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||||
@@ -118,7 +155,10 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
|
|||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: s.Build.CacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
CacheTo: s.Build.CacheTo,
|
||||||
NetworkMode: &s.Build.Network,
|
NetworkMode: &s.Build.Network,
|
||||||
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
|
ShmSize: shmSize,
|
||||||
|
Ulimits: ulimits,
|
||||||
}
|
}
|
||||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -156,8 +196,8 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateCompose(dt []byte, envs map[string]string) error {
|
func validateCompose(dt []byte, envs map[string]string) error {
|
||||||
_, err := loader.Load(compose.ConfigDetails{
|
_, err := loader.Load(composetypes.ConfigDetails{
|
||||||
ConfigFiles: []compose.ConfigFile{
|
ConfigFiles: []composetypes.ConfigFile{
|
||||||
{
|
{
|
||||||
Content: dt,
|
Content: dt,
|
||||||
},
|
},
|
||||||
@@ -220,7 +260,7 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
|
|||||||
return curenv, nil
|
return curenv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func flatten(in compose.MappingWithEquals) map[string]*string {
|
func flatten(in composetypes.MappingWithEquals) map[string]*string {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -249,7 +289,7 @@ type xbake struct {
|
|||||||
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
||||||
Contexts stringMap `yaml:"contexts,omitempty"`
|
Contexts stringMap `yaml:"contexts,omitempty"`
|
||||||
// don't forget to update documentation if you add a new field:
|
// don't forget to update documentation if you add a new field:
|
||||||
// docs/manuals/bake/compose-file.md#extension-field-with-x-bake
|
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
type stringMap map[string]string
|
type stringMap map[string]string
|
||||||
@@ -299,6 +339,7 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
||||||
|
sort.Strings(t.SSH)
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
@@ -324,8 +365,8 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
|
||||||
if psecret.External.External {
|
if psecret.External {
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -342,3 +383,17 @@ func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.Se
|
|||||||
|
|
||||||
return strings.Join(bkattrs, ","), nil
|
return strings.Join(bkattrs, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||||
|
// csv format.
|
||||||
|
func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
|
||||||
|
var bkattrs []string
|
||||||
|
|
||||||
|
bkattrs = append(bkattrs, sshKey.ID)
|
||||||
|
|
||||||
|
if sshKey.Path != "" {
|
||||||
|
bkattrs = append(bkattrs, sshKey.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(bkattrs, "=")
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -22,7 +22,7 @@ services:
|
|||||||
build:
|
build:
|
||||||
context: ./dir
|
context: ./dir
|
||||||
additional_contexts:
|
additional_contexts:
|
||||||
foo: /bar
|
foo: ./bar
|
||||||
dockerfile: Dockerfile-alternate
|
dockerfile: Dockerfile-alternate
|
||||||
network:
|
network:
|
||||||
none
|
none
|
||||||
@@ -32,10 +32,15 @@ services:
|
|||||||
- type=local,src=path/to/cache
|
- type=local,src=path/to/cache
|
||||||
cache_to:
|
cache_to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
|
ssh:
|
||||||
|
- key=path/to/key
|
||||||
|
- default
|
||||||
secrets:
|
secrets:
|
||||||
- token
|
- token
|
||||||
- aws
|
- aws
|
||||||
webapp2:
|
webapp2:
|
||||||
|
profiles:
|
||||||
|
- test
|
||||||
build:
|
build:
|
||||||
context: ./dir
|
context: ./dir
|
||||||
dockerfile_inline: |
|
dockerfile_inline: |
|
||||||
@@ -47,7 +52,7 @@ secrets:
|
|||||||
file: /root/.aws/credentials
|
file: /root/.aws/credentials
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
@@ -60,25 +65,26 @@ secrets:
|
|||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, "db", c.Targets[0].Name)
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "db", *c.Targets[0].Context)
|
||||||
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, "./dir", *c.Targets[1].Context)
|
require.Equal(t, "dir", *c.Targets[1].Context)
|
||||||
require.Equal(t, map[string]string{"foo": "/bar"}, c.Targets[1].Contexts)
|
require.Equal(t, map[string]string{"foo": "bar"}, c.Targets[1].Contexts)
|
||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
"id=token,env=ENV_TOKEN",
|
||||||
"id=aws,src=/root/.aws/credentials",
|
"id=aws,src=/root/.aws/credentials",
|
||||||
}, c.Targets[1].Secrets)
|
}, c.Targets[1].Secrets)
|
||||||
|
|
||||||
require.Equal(t, "webapp2", c.Targets[2].Name)
|
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||||
require.Equal(t, "./dir", *c.Targets[2].Context)
|
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||||
require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline)
|
require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,7 +96,7 @@ services:
|
|||||||
webapp:
|
webapp:
|
||||||
build: ./db
|
build: ./db
|
||||||
`)
|
`)
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
@@ -109,7 +115,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
@@ -134,7 +140,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
@@ -165,7 +171,7 @@ services:
|
|||||||
t.Setenv("BAR", "foo")
|
t.Setenv("BAR", "foo")
|
||||||
t.Setenv("ZZZ_BAR", "zzz_foo")
|
t.Setenv("ZZZ_BAR", "zzz_foo")
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["FOO"])
|
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["FOO"])
|
||||||
require.Equal(t, ptrstr("zzz_foo"), c.Targets[0].Args["BAR"])
|
require.Equal(t, ptrstr("zzz_foo"), c.Targets[0].Args["BAR"])
|
||||||
@@ -179,7 +185,7 @@ services:
|
|||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -204,7 +210,7 @@ networks:
|
|||||||
gateway: 10.5.0.254
|
gateway: 10.5.0.254
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -221,7 +227,7 @@ services:
|
|||||||
- bar
|
- bar
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
||||||
}
|
}
|
||||||
@@ -258,7 +264,7 @@ networks:
|
|||||||
name: test-net
|
name: test-net
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,6 +282,8 @@ services:
|
|||||||
- user/app:cache
|
- user/app:cache
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:baz
|
- ct-addon:baz
|
||||||
|
ssh:
|
||||||
|
key: path/to/key
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
@@ -285,6 +293,9 @@ services:
|
|||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
- ct-addon:alp
|
- ct-addon:alp
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
|
- other=path/to/otherkey
|
||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
- linux/amd64
|
||||||
- linux/arm64
|
- linux/arm64
|
||||||
@@ -301,6 +312,11 @@ services:
|
|||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
|
shm_size: 128m
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 1024
|
||||||
|
hard: 1024
|
||||||
x-bake:
|
x-bake:
|
||||||
secret:
|
secret:
|
||||||
- id=mysecret,src=/local/secret
|
- id=mysecret,src=/local/secret
|
||||||
@@ -311,7 +327,7 @@ services:
|
|||||||
no-cache: true
|
no-cache: true
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
@@ -322,6 +338,7 @@ services:
|
|||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
@@ -330,6 +347,8 @@ services:
|
|||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
|
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||||
|
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
@@ -344,6 +363,8 @@ services:
|
|||||||
- user/app:cache
|
- user/app:cache
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
x-bake:
|
x-bake:
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
@@ -353,14 +374,18 @@ services:
|
|||||||
- type=local,src=path/to/cache
|
- type=local,src=path/to/cache
|
||||||
cache-to:
|
cache-to:
|
||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
|
ssh:
|
||||||
|
- default
|
||||||
|
- key=path/to/key
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
@@ -388,7 +413,7 @@ services:
|
|||||||
- ` + envf.Name() + `
|
- ` + envf.Name() + `
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "FOO": ptrstr("bsdf -csdf"), "NODE_ENV": ptrstr("test")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "FOO": ptrstr("bsdf -csdf"), "NODE_ENV": ptrstr("test")}, c.Targets[0].Args)
|
||||||
}
|
}
|
||||||
@@ -434,7 +459,7 @@ services:
|
|||||||
published: "3306"
|
published: "3306"
|
||||||
protocol: tcp
|
protocol: tcp
|
||||||
`)
|
`)
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -480,7 +505,7 @@ func TestServiceName(t *testing.T) {
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.svc, func(t *testing.T) {
|
t.Run(tt.svc, func(t *testing.T) {
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: []byte(`
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: []byte(`
|
||||||
services:
|
services:
|
||||||
` + tt.svc + `:
|
` + tt.svc + `:
|
||||||
build:
|
build:
|
||||||
@@ -551,7 +576,7 @@ services:
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, err := ParseCompose([]compose.ConfigFile{{Content: tt.dt}}, nil)
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: tt.dt}}, nil)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -649,11 +674,130 @@ services:
|
|||||||
bar: "baz"
|
bar: "baz"
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDependsOn(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
depends_on:
|
||||||
|
- bar
|
||||||
|
bar:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
`)
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInclude(t *testing.T) {
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
|
err := os.WriteFile(filepath.Join(tmpdir, "compose-foo.yml"), []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
target: buildfoo
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
`), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var dt = []byte(`
|
||||||
|
include:
|
||||||
|
- compose-foo.yml
|
||||||
|
|
||||||
|
services:
|
||||||
|
bar:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
target: buildbar
|
||||||
|
`)
|
||||||
|
|
||||||
|
chdir(t, tmpdir)
|
||||||
|
c, err := ParseComposeFiles([]File{{
|
||||||
|
Name: "composetypes.yml",
|
||||||
|
Data: dt,
|
||||||
|
}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
require.Equal(t, "bar", c.Targets[0].Name)
|
||||||
|
require.Equal(t, "buildbar", *c.Targets[0].Target)
|
||||||
|
require.Equal(t, "foo", c.Targets[1].Name)
|
||||||
|
require.Equal(t, "buildfoo", *c.Targets[1].Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDevelop(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
develop:
|
||||||
|
watch:
|
||||||
|
- path: ./webapp/html
|
||||||
|
action: sync
|
||||||
|
target: /var/www
|
||||||
|
ignore:
|
||||||
|
- node_modules/
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroup(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
cgroup: private
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProjectName(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: ./webapp
|
||||||
|
args:
|
||||||
|
PROJECT_NAME: ${COMPOSE_PROJECT_NAME}
|
||||||
|
`)
|
||||||
|
|
||||||
|
t.Run("default", func(t *testing.T) {
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.Targets, 1)
|
||||||
|
require.Len(t, c.Targets[0].Args, 1)
|
||||||
|
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("bake")}, c.Targets[0].Args)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("env", func(t *testing.T) {
|
||||||
|
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, map[string]string{"COMPOSE_PROJECT_NAME": "foo"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.Targets, 1)
|
||||||
|
require.Len(t, c.Targets[0].Args, 1)
|
||||||
|
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("foo")}, c.Targets[0].Args)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// chdir changes the current working directory to the named directory,
|
// chdir changes the current working directory to the named directory,
|
||||||
// and then restore the original working directory at the end of the test.
|
// and then restore the original working directory at the end of the test.
|
||||||
func chdir(t *testing.T, dir string) {
|
func chdir(t *testing.T, dir string) {
|
||||||
|
|||||||
175
bake/entitlements.go
Normal file
175
bake/entitlements.go
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
package bake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EntitlementKey string
|
||||||
|
|
||||||
|
const (
|
||||||
|
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||||
|
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||||
|
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||||
|
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||||
|
EntitlementKeyFS EntitlementKey = "fs"
|
||||||
|
EntitlementKeyImagePush EntitlementKey = "image.push"
|
||||||
|
EntitlementKeyImageLoad EntitlementKey = "image.load"
|
||||||
|
EntitlementKeyImage EntitlementKey = "image"
|
||||||
|
EntitlementKeySSH EntitlementKey = "ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EntitlementConf struct {
|
||||||
|
NetworkHost bool
|
||||||
|
SecurityInsecure bool
|
||||||
|
FSRead []string
|
||||||
|
FSWrite []string
|
||||||
|
ImagePush []string
|
||||||
|
ImageLoad []string
|
||||||
|
SSH bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||||
|
var conf EntitlementConf
|
||||||
|
for _, e := range in {
|
||||||
|
switch e {
|
||||||
|
case string(EntitlementKeyNetworkHost):
|
||||||
|
conf.NetworkHost = true
|
||||||
|
case string(EntitlementKeySecurityInsecure):
|
||||||
|
conf.SecurityInsecure = true
|
||||||
|
case string(EntitlementKeySSH):
|
||||||
|
conf.SSH = true
|
||||||
|
default:
|
||||||
|
k, v, _ := strings.Cut(e, "=")
|
||||||
|
switch k {
|
||||||
|
case string(EntitlementKeyFSRead):
|
||||||
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
|
case string(EntitlementKeyFSWrite):
|
||||||
|
conf.FSWrite = append(conf.FSWrite, v)
|
||||||
|
case string(EntitlementKeyFS):
|
||||||
|
conf.FSRead = append(conf.FSRead, v)
|
||||||
|
conf.FSWrite = append(conf.FSWrite, v)
|
||||||
|
case string(EntitlementKeyImagePush):
|
||||||
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
|
case string(EntitlementKeyImageLoad):
|
||||||
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
|
case string(EntitlementKeyImage):
|
||||||
|
conf.ImagePush = append(conf.ImagePush, v)
|
||||||
|
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||||
|
default:
|
||||||
|
return conf, errors.Errorf("uknown entitlement key %q", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: dedupe slices and parent paths
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) {
|
||||||
|
var expected EntitlementConf
|
||||||
|
|
||||||
|
for _, v := range m {
|
||||||
|
if err := c.check(v, &expected); err != nil {
|
||||||
|
return EntitlementConf{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return expected, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||||
|
for _, e := range bo.Allow {
|
||||||
|
switch e {
|
||||||
|
case entitlements.EntitlementNetworkHost:
|
||||||
|
if !c.NetworkHost {
|
||||||
|
expected.NetworkHost = true
|
||||||
|
}
|
||||||
|
case entitlements.EntitlementSecurityInsecure:
|
||||||
|
if !c.SecurityInsecure {
|
||||||
|
expected.SecurityInsecure = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
||||||
|
var term bool
|
||||||
|
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||||
|
term = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var msgs []string
|
||||||
|
var flags []string
|
||||||
|
|
||||||
|
if c.NetworkHost {
|
||||||
|
msgs = append(msgs, " - Running build containers that can access host network")
|
||||||
|
flags = append(flags, "network.host")
|
||||||
|
}
|
||||||
|
if c.SecurityInsecure {
|
||||||
|
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||||
|
flags = append(flags, "security.insecure")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||||
|
for _, m := range msgs {
|
||||||
|
fmt.Fprintf(out, "%s\n", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range flags {
|
||||||
|
flags[i] = "--allow=" + f
|
||||||
|
}
|
||||||
|
|
||||||
|
if term {
|
||||||
|
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
args := append([]string(nil), os.Args...)
|
||||||
|
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||||
|
args[0] = v
|
||||||
|
}
|
||||||
|
idx := slices.Index(args, "bake")
|
||||||
|
|
||||||
|
if idx != -1 {
|
||||||
|
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||||
|
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if term {
|
||||||
|
fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
answerCh := make(chan string, 1)
|
||||||
|
go func() {
|
||||||
|
answer, _, _ := reader.ReadLine()
|
||||||
|
answerCh <- string(answer)
|
||||||
|
close(answerCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case answer := <-answerCh:
|
||||||
|
if strings.ToLower(string(answer)) == "y" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("additional privileges requested")
|
||||||
|
}
|
||||||
101
bake/hcl_test.go
101
bake/hcl_test.go
@@ -273,7 +273,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -285,7 +285,7 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
|
|||||||
|
|
||||||
t.Setenv("FOO", "def")
|
t.Setenv("FOO", "def")
|
||||||
|
|
||||||
c, err = ParseFiles([]File{
|
c, _, err = ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -322,7 +322,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -334,7 +334,7 @@ func TestHCLVarsWithVars(t *testing.T) {
|
|||||||
|
|
||||||
t.Setenv("BASE", "new")
|
t.Setenv("BASE", "new")
|
||||||
|
|
||||||
c, err = ParseFiles([]File{
|
c, _, err = ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -612,7 +612,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
FOO="def"
|
FOO="def"
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -623,7 +623,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
|
|
||||||
t.Setenv("FOO", "ghi")
|
t.Setenv("FOO", "ghi")
|
||||||
|
|
||||||
c, err = ParseFiles([]File{
|
c, _, err = ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -634,6 +634,29 @@ func TestHCLMultiFileAttrs(t *testing.T) {
|
|||||||
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLMultiFileGlobalAttrs(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
FOO = "abc"
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${FOO}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
FOO = "def"
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, _, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-def", *c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestHCLDuplicateTarget(t *testing.T) {
|
func TestHCLDuplicateTarget(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
target "app" {
|
target "app" {
|
||||||
@@ -807,7 +830,7 @@ func TestHCLRenameMultiFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.hcl"},
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
{Data: dt3, Name: "c3.hcl"},
|
{Data: dt3, Name: "c3.hcl"},
|
||||||
@@ -1027,7 +1050,7 @@ func TestHCLMatrixArgsOverride(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "docker-bake.hcl"},
|
{Data: dt, Name: "docker-bake.hcl"},
|
||||||
}, map[string]string{"ABC": "11,22,33"})
|
}, map[string]string{"ABC": "11,22,33"})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1090,6 +1113,27 @@ func TestHCLMatrixBadTypes(t *testing.T) {
|
|||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLMatrixWithGlobalTarget(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "x" {
|
||||||
|
tags = ["a", "b"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "default" {
|
||||||
|
tags = target.x.tags
|
||||||
|
matrix = {
|
||||||
|
dummy = [""]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
require.Equal(t, "x", c.Targets[0].Name)
|
||||||
|
require.Equal(t, "default", c.Targets[1].Name)
|
||||||
|
require.Equal(t, []string{"a", "b"}, c.Targets[1].Tags)
|
||||||
|
}
|
||||||
|
|
||||||
func TestJSONAttributes(t *testing.T) {
|
func TestJSONAttributes(t *testing.T) {
|
||||||
dt := []byte(`{"FOO": "abc", "variable": {"BAR": {"default": "def"}}, "target": { "app": { "args": {"v1": "pre-${FOO}-${BAR}"}} } }`)
|
dt := []byte(`{"FOO": "abc", "variable": {"BAR": {"default": "def"}}, "target": { "app": { "args": {"v1": "pre-${FOO}-${BAR}"}} } }`)
|
||||||
|
|
||||||
@@ -1192,7 +1236,7 @@ services:
|
|||||||
v2: "bar"
|
v2: "bar"
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
{Data: dt2, Name: "c2.yml"},
|
{Data: dt2, Name: "c2.yml"},
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -1214,7 +1258,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{Data: dt, Name: "c1.hcl"},
|
{Data: dt, Name: "c1.hcl"},
|
||||||
}, map[string]string{
|
}, map[string]string{
|
||||||
"BAKE_CMD_CONTEXT": "foo",
|
"BAKE_CMD_CONTEXT": "foo",
|
||||||
@@ -1228,7 +1272,7 @@ func TestHCLBuiltinVars(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCombineHCLAndJSONTargets(t *testing.T) {
|
func TestCombineHCLAndJSONTargets(t *testing.T) {
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{
|
{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
@@ -1304,7 +1348,7 @@ target "b" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCombineHCLAndJSONVars(t *testing.T) {
|
func TestCombineHCLAndJSONVars(t *testing.T) {
|
||||||
c, err := ParseFiles([]File{
|
c, _, err := ParseFiles([]File{
|
||||||
{
|
{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
@@ -1401,6 +1445,39 @@ func TestVarUnsupportedType(t *testing.T) {
|
|||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLIndexOfFunc(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
variable "APP_VERSIONS" {
|
||||||
|
default = [
|
||||||
|
"1.42.4",
|
||||||
|
"1.42.3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
target "default" {
|
||||||
|
args = {
|
||||||
|
APP_VERSION = app_version
|
||||||
|
}
|
||||||
|
matrix = {
|
||||||
|
app_version = APP_VERSIONS
|
||||||
|
}
|
||||||
|
name="app-${replace(app_version, ".", "-")}"
|
||||||
|
tags = [
|
||||||
|
"app:${app_version}",
|
||||||
|
indexof(APP_VERSIONS, app_version) == 0 ? "app:latest" : "",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
require.Equal(t, "app-1-42-4", c.Targets[0].Name)
|
||||||
|
require.Equal(t, "app:latest", c.Targets[0].Tags[1])
|
||||||
|
require.Equal(t, "app-1-42-3", c.Targets[1].Name)
|
||||||
|
require.Empty(t, c.Targets[1].Tags[1])
|
||||||
|
}
|
||||||
|
|
||||||
func ptrstr(s interface{}) *string {
|
func ptrstr(s interface{}) *string {
|
||||||
var n *string
|
var n *string
|
||||||
if reflect.ValueOf(s).Kind() == reflect.String {
|
if reflect.ValueOf(s).Kind() == reflect.String {
|
||||||
|
|||||||
@@ -27,7 +27,9 @@ type Opt struct {
|
|||||||
type variable struct {
|
type variable struct {
|
||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||||
|
Description string `json:"description,omitempty" hcl:"description,optional"`
|
||||||
Body hcl.Body `json:"-" hcl:",body"`
|
Body hcl.Body `json:"-" hcl:",body"`
|
||||||
|
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type functionDef struct {
|
type functionDef struct {
|
||||||
@@ -73,7 +75,12 @@ type WithGetName interface {
|
|||||||
GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(hcl.Expression) hcl.Diagnostics) (string, error)
|
GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(hcl.Expression) hcl.Diagnostics) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errUndefined = errors.New("undefined")
|
// errUndefined is returned when a variable or function is not defined.
|
||||||
|
type errUndefined struct{}
|
||||||
|
|
||||||
|
func (errUndefined) Error() string {
|
||||||
|
return "undefined"
|
||||||
|
}
|
||||||
|
|
||||||
func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map[string]struct{}, allowMissing bool) hcl.Diagnostics {
|
func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map[string]struct{}, allowMissing bool) hcl.Diagnostics {
|
||||||
fns, hcldiags := funcCalls(exp)
|
fns, hcldiags := funcCalls(exp)
|
||||||
@@ -83,7 +90,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
|||||||
|
|
||||||
for _, fn := range fns {
|
for _, fn := range fns {
|
||||||
if err := p.resolveFunction(ectx, fn); err != nil {
|
if err := p.resolveFunction(ectx, fn); err != nil {
|
||||||
if allowMissing && errors.Is(err, errUndefined) {
|
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||||
@@ -137,7 +144,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
|||||||
}
|
}
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
if err := p.resolveBlock(block, target); err != nil {
|
if err := p.resolveBlock(block, target); err != nil {
|
||||||
if allowMissing && errors.Is(err, errUndefined) {
|
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||||
@@ -145,7 +152,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := p.resolveValue(ectx, v.RootName()); err != nil {
|
if err := p.resolveValue(ectx, v.RootName()); err != nil {
|
||||||
if allowMissing && errors.Is(err, errUndefined) {
|
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||||
@@ -167,7 +174,7 @@ func (p *parser) resolveFunction(ectx *hcl.EvalContext, name string) error {
|
|||||||
}
|
}
|
||||||
f, ok := p.funcs[name]
|
f, ok := p.funcs[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Wrapf(errUndefined, "function %q does not exist", name)
|
return errors.Wrapf(errUndefined{}, "function %q does not exist", name)
|
||||||
}
|
}
|
||||||
if _, ok := p.progressF[key(ectx, name)]; ok {
|
if _, ok := p.progressF[key(ectx, name)]; ok {
|
||||||
return errors.Errorf("function cycle not allowed for %s", name)
|
return errors.Errorf("function cycle not allowed for %s", name)
|
||||||
@@ -257,7 +264,7 @@ func (p *parser) resolveValue(ectx *hcl.EvalContext, name string) (err error) {
|
|||||||
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
||||||
vr, ok := p.vars[name]
|
vr, ok := p.vars[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Wrapf(errUndefined, "variable %q does not exist", name)
|
return errors.Wrapf(errUndefined{}, "variable %q does not exist", name)
|
||||||
}
|
}
|
||||||
def = vr.Default
|
def = vr.Default
|
||||||
ectx = p.ectx
|
ectx = p.ectx
|
||||||
@@ -534,7 +541,18 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string, hcl.Diagnostics) {
|
type Variable struct {
|
||||||
|
Name string
|
||||||
|
Description string
|
||||||
|
Value *string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ParseMeta struct {
|
||||||
|
Renamed map[string]map[string][]string
|
||||||
|
AllVariables []*Variable
|
||||||
|
}
|
||||||
|
|
||||||
|
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
|
||||||
reserved := map[string]struct{}{}
|
reserved := map[string]struct{}{}
|
||||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||||
|
|
||||||
@@ -613,7 +631,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
|
|
||||||
attrs, diags := b.JustAttributes()
|
attrs, diags := b.JustAttributes()
|
||||||
if diags.HasErrors() {
|
if diags.HasErrors() {
|
||||||
if d := removeAttributesDiags(diags, reserved, p.vars); len(d) > 0 {
|
if d := removeAttributesDiags(diags, reserved, p.vars, attrs); len(d) > 0 {
|
||||||
return nil, d
|
return nil, d
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -631,17 +649,19 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range content.Attributes {
|
for _, a := range content.Attributes {
|
||||||
|
a := a
|
||||||
return nil, hcl.Diagnostics{
|
return nil, hcl.Diagnostics{
|
||||||
&hcl.Diagnostic{
|
&hcl.Diagnostic{
|
||||||
Severity: hcl.DiagError,
|
Severity: hcl.DiagError,
|
||||||
Summary: "Invalid attribute",
|
Summary: "Invalid attribute",
|
||||||
Detail: "global attributes currently not supported",
|
Detail: "global attributes currently not supported",
|
||||||
Subject: &a.Range,
|
Subject: a.Range.Ptr(),
|
||||||
Context: &a.Range,
|
Context: a.Range.Ptr(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vars := make([]*Variable, 0, len(p.vars))
|
||||||
for k := range p.vars {
|
for k := range p.vars {
|
||||||
if err := p.resolveValue(p.ectx, k); err != nil {
|
if err := p.resolveValue(p.ectx, k); err != nil {
|
||||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||||
@@ -650,6 +670,21 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
r := p.vars[k].Body.MissingItemRange()
|
r := p.vars[k].Body.MissingItemRange()
|
||||||
return nil, wrapErrorDiagnostic("Invalid value", err, &r, &r)
|
return nil, wrapErrorDiagnostic("Invalid value", err, &r, &r)
|
||||||
}
|
}
|
||||||
|
v := &Variable{
|
||||||
|
Name: p.vars[k].Name,
|
||||||
|
Description: p.vars[k].Description,
|
||||||
|
}
|
||||||
|
if vv := p.ectx.Variables[k]; !vv.IsNull() {
|
||||||
|
var s string
|
||||||
|
switch vv.Type() {
|
||||||
|
case cty.String:
|
||||||
|
s = vv.AsString()
|
||||||
|
case cty.Bool:
|
||||||
|
s = strconv.FormatBool(vv.True())
|
||||||
|
}
|
||||||
|
v.Value = &s
|
||||||
|
}
|
||||||
|
vars = append(vars, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k := range p.funcs {
|
for k := range p.funcs {
|
||||||
@@ -660,13 +695,14 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
var subject *hcl.Range
|
var subject *hcl.Range
|
||||||
var context *hcl.Range
|
var context *hcl.Range
|
||||||
if p.funcs[k].Params != nil {
|
if p.funcs[k].Params != nil {
|
||||||
subject = &p.funcs[k].Params.Range
|
subject = p.funcs[k].Params.Range.Ptr()
|
||||||
context = subject
|
context = subject
|
||||||
} else {
|
} else {
|
||||||
for _, block := range blocks.Blocks {
|
for _, block := range blocks.Blocks {
|
||||||
|
block := block
|
||||||
if block.Type == "function" && len(block.Labels) == 1 && block.Labels[0] == k {
|
if block.Type == "function" && len(block.Labels) == 1 && block.Labels[0] == k {
|
||||||
subject = &block.LabelRanges[0]
|
subject = block.LabelRanges[0].Ptr()
|
||||||
context = &block.DefRange
|
context = block.DefRange.Ptr()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -732,6 +768,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
|
|
||||||
diags = hcl.Diagnostics{}
|
diags = hcl.Diagnostics{}
|
||||||
for _, b := range content.Blocks {
|
for _, b := range content.Blocks {
|
||||||
|
b := b
|
||||||
v := reflect.ValueOf(val)
|
v := reflect.ValueOf(val)
|
||||||
|
|
||||||
err := p.resolveBlock(b, nil)
|
err := p.resolveBlock(b, nil)
|
||||||
@@ -742,7 +779,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, wrapErrorDiagnostic("Invalid block", err, &b.LabelRanges[0], &b.DefRange)
|
return nil, wrapErrorDiagnostic("Invalid block", err, b.LabelRanges[0].Ptr(), b.DefRange.Ptr())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -792,7 +829,10 @@ func Parse(b hcl.Body, opt Opt, val interface{}) (map[string]map[string][]string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return renamed, nil
|
return &ParseMeta{
|
||||||
|
Renamed: renamed,
|
||||||
|
AllVariables: vars,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrapErrorDiagnostic wraps an error into a hcl.Diagnostics object.
|
// wrapErrorDiagnostic wraps an error into a hcl.Diagnostics object.
|
||||||
@@ -854,7 +894,7 @@ func getNameIndex(v reflect.Value) (int, bool) {
|
|||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeAttributesDiags(diags hcl.Diagnostics, reserved map[string]struct{}, vars map[string]*variable) hcl.Diagnostics {
|
func removeAttributesDiags(diags hcl.Diagnostics, reserved map[string]struct{}, vars map[string]*variable, attrs hcl.Attributes) hcl.Diagnostics {
|
||||||
var fdiags hcl.Diagnostics
|
var fdiags hcl.Diagnostics
|
||||||
for _, d := range diags {
|
for _, d := range diags {
|
||||||
if fout := func(d *hcl.Diagnostic) bool {
|
if fout := func(d *hcl.Diagnostic) bool {
|
||||||
@@ -876,6 +916,12 @@ func removeAttributesDiags(diags hcl.Diagnostics, reserved map[string]struct{},
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for a := range attrs {
|
||||||
|
// Do the same for attributes
|
||||||
|
if strings.HasPrefix(d.Detail, fmt.Sprintf(`Argument "%s" was already set at `, a)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}(d); !fout {
|
}(d); !fout {
|
||||||
fdiags = append(fdiags, d)
|
fdiags = append(fdiags, d)
|
||||||
|
|||||||
228
bake/hclparser/merged.go
Normal file
228
bake/hclparser/merged.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Forked from https://github.com/hashicorp/hcl/blob/4679383728fe331fc8a6b46036a27b8f818d9bc0/merged.go
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MergeFiles combines the given files to produce a single body that contains
|
||||||
|
// configuration from all of the given files.
|
||||||
|
//
|
||||||
|
// The ordering of the given files decides the order in which contained
|
||||||
|
// elements will be returned. If any top-level attributes are defined with
|
||||||
|
// the same name across multiple files, a diagnostic will be produced from
|
||||||
|
// the Content and PartialContent methods describing this error in a
|
||||||
|
// user-friendly way.
|
||||||
|
func MergeFiles(files []*hcl.File) hcl.Body {
|
||||||
|
var bodies []hcl.Body
|
||||||
|
for _, file := range files {
|
||||||
|
bodies = append(bodies, file.Body)
|
||||||
|
}
|
||||||
|
return MergeBodies(bodies)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeBodies is like MergeFiles except it deals directly with bodies, rather
|
||||||
|
// than with entire files.
|
||||||
|
func MergeBodies(bodies []hcl.Body) hcl.Body {
|
||||||
|
if len(bodies) == 0 {
|
||||||
|
// Swap out for our singleton empty body, to reduce the number of
|
||||||
|
// empty slices we have hanging around.
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any of the given bodies are already merged bodies, we'll unpack
|
||||||
|
// to flatten to a single mergedBodies, since that's conceptually simpler.
|
||||||
|
// This also, as a side-effect, eliminates any empty bodies, since
|
||||||
|
// empties are merged bodies with no inner bodies.
|
||||||
|
var newLen int
|
||||||
|
var flatten bool
|
||||||
|
for _, body := range bodies {
|
||||||
|
if children, merged := body.(mergedBodies); merged {
|
||||||
|
newLen += len(children)
|
||||||
|
flatten = true
|
||||||
|
} else {
|
||||||
|
newLen++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside
|
||||||
|
return mergedBodies(bodies)
|
||||||
|
}
|
||||||
|
|
||||||
|
if newLen == 0 {
|
||||||
|
// Don't allocate a new empty when we already have one
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
n := make([]hcl.Body, 0, newLen)
|
||||||
|
for _, body := range bodies {
|
||||||
|
if children, merged := body.(mergedBodies); merged {
|
||||||
|
n = append(n, children...)
|
||||||
|
} else {
|
||||||
|
n = append(n, body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mergedBodies(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyBody = mergedBodies([]hcl.Body{})
|
||||||
|
|
||||||
|
// EmptyBody returns a body with no content. This body can be used as a
|
||||||
|
// placeholder when a body is required but no body content is available.
|
||||||
|
func EmptyBody() hcl.Body {
|
||||||
|
return emptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergedBodies []hcl.Body
|
||||||
|
|
||||||
|
// Content returns the content produced by applying the given schema to all
|
||||||
|
// of the merged bodies and merging the result.
|
||||||
|
//
|
||||||
|
// Although required attributes _are_ supported, they should be used sparingly
|
||||||
|
// with merged bodies since in this case there is no contextual information
|
||||||
|
// with which to return good diagnostics. Applications working with merged
|
||||||
|
// bodies may wish to mark all attributes as optional and then check for
|
||||||
|
// required attributes afterwards, to produce better diagnostics.
|
||||||
|
func (mb mergedBodies) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||||
|
// the returned body will always be empty in this case, because mergedContent
|
||||||
|
// will only ever call Content on the child bodies.
|
||||||
|
content, _, diags := mb.mergedContent(schema, false)
|
||||||
|
return content, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||||
|
return mb.mergedContent(schema, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||||
|
attrs := make(map[string]*hcl.Attribute)
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
|
||||||
|
for _, body := range mb {
|
||||||
|
thisAttrs, thisDiags := body.JustAttributes()
|
||||||
|
|
||||||
|
if len(thisDiags) != 0 {
|
||||||
|
diags = append(diags, thisDiags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, attr := range thisAttrs {
|
||||||
|
if existing := attrs[name]; existing != nil {
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Duplicate argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Argument %q was already set at %s",
|
||||||
|
name, existing.NameRange.String(),
|
||||||
|
),
|
||||||
|
Subject: thisAttrs[name].NameRange.Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
attrs[name] = attr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return attrs, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) MissingItemRange() hcl.Range {
|
||||||
|
if len(mb) == 0 {
|
||||||
|
// Nothing useful to return here, so we'll return some garbage.
|
||||||
|
return hcl.Range{
|
||||||
|
Filename: "<empty>",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// arbitrarily use the first body's missing item range
|
||||||
|
return mb[0].MissingItemRange()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb mergedBodies) mergedContent(schema *hcl.BodySchema, partial bool) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||||
|
// We need to produce a new schema with none of the attributes marked as
|
||||||
|
// required, since _any one_ of our bodies can contribute an attribute value.
|
||||||
|
// We'll separately check that all required attributes are present at
|
||||||
|
// the end.
|
||||||
|
mergedSchema := &hcl.BodySchema{
|
||||||
|
Blocks: schema.Blocks,
|
||||||
|
}
|
||||||
|
for _, attrS := range schema.Attributes {
|
||||||
|
mergedAttrS := attrS
|
||||||
|
mergedAttrS.Required = false
|
||||||
|
mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mergedLeftovers []hcl.Body
|
||||||
|
content := &hcl.BodyContent{
|
||||||
|
Attributes: map[string]*hcl.Attribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
for _, body := range mb {
|
||||||
|
var thisContent *hcl.BodyContent
|
||||||
|
var thisLeftovers hcl.Body
|
||||||
|
var thisDiags hcl.Diagnostics
|
||||||
|
|
||||||
|
if partial {
|
||||||
|
thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
|
||||||
|
} else {
|
||||||
|
thisContent, thisDiags = body.Content(mergedSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
if thisLeftovers != nil {
|
||||||
|
mergedLeftovers = append(mergedLeftovers, thisLeftovers)
|
||||||
|
}
|
||||||
|
if len(thisDiags) != 0 {
|
||||||
|
diags = append(diags, thisDiags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if thisContent.Attributes != nil {
|
||||||
|
for name, attr := range thisContent.Attributes {
|
||||||
|
if existing := content.Attributes[name]; existing != nil {
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Duplicate argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Argument %q was already set at %s",
|
||||||
|
name, existing.NameRange.String(),
|
||||||
|
),
|
||||||
|
Subject: thisContent.Attributes[name].NameRange.Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
content.Attributes[name] = attr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(thisContent.Blocks) != 0 {
|
||||||
|
content.Blocks = append(content.Blocks, thisContent.Blocks...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, we check for required attributes.
|
||||||
|
for _, attrS := range schema.Attributes {
|
||||||
|
if !attrS.Required {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if content.Attributes[attrS.Name] == nil {
|
||||||
|
// We don't have any context here to produce a good diagnostic,
|
||||||
|
// which is why we warn in the Content docstring to minimize the
|
||||||
|
// use of required attributes on merged bodies.
|
||||||
|
diags = diags.Append(&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Missing required argument",
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"The argument %q is required, but was not set.",
|
||||||
|
attrS.Name,
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
leftoverBody := MergeBodies(mergedLeftovers)
|
||||||
|
return content, leftoverBody, diags
|
||||||
|
}
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
package hclparser
|
package hclparser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||||
@@ -14,122 +17,246 @@ import (
|
|||||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stdlibFunctions = map[string]function.Function{
|
type funcDef struct {
|
||||||
"absolute": stdlib.AbsoluteFunc,
|
name string
|
||||||
"add": stdlib.AddFunc,
|
fn function.Function
|
||||||
"and": stdlib.AndFunc,
|
factory func() function.Function
|
||||||
"base64decode": encoding.Base64DecodeFunc,
|
}
|
||||||
"base64encode": encoding.Base64EncodeFunc,
|
|
||||||
"bcrypt": crypto.BcryptFunc,
|
var stdlibFunctions = []funcDef{
|
||||||
"byteslen": stdlib.BytesLenFunc,
|
{name: "absolute", fn: stdlib.AbsoluteFunc},
|
||||||
"bytesslice": stdlib.BytesSliceFunc,
|
{name: "add", fn: stdlib.AddFunc},
|
||||||
"can": tryfunc.CanFunc,
|
{name: "and", fn: stdlib.AndFunc},
|
||||||
"ceil": stdlib.CeilFunc,
|
{name: "base64decode", fn: encoding.Base64DecodeFunc},
|
||||||
"chomp": stdlib.ChompFunc,
|
{name: "base64encode", fn: encoding.Base64EncodeFunc},
|
||||||
"chunklist": stdlib.ChunklistFunc,
|
{name: "basename", factory: basenameFunc},
|
||||||
"cidrhost": cidr.HostFunc,
|
{name: "bcrypt", fn: crypto.BcryptFunc},
|
||||||
"cidrnetmask": cidr.NetmaskFunc,
|
{name: "byteslen", fn: stdlib.BytesLenFunc},
|
||||||
"cidrsubnet": cidr.SubnetFunc,
|
{name: "bytesslice", fn: stdlib.BytesSliceFunc},
|
||||||
"cidrsubnets": cidr.SubnetsFunc,
|
{name: "can", fn: tryfunc.CanFunc},
|
||||||
"coalesce": stdlib.CoalesceFunc,
|
{name: "ceil", fn: stdlib.CeilFunc},
|
||||||
"coalescelist": stdlib.CoalesceListFunc,
|
{name: "chomp", fn: stdlib.ChompFunc},
|
||||||
"compact": stdlib.CompactFunc,
|
{name: "chunklist", fn: stdlib.ChunklistFunc},
|
||||||
"concat": stdlib.ConcatFunc,
|
{name: "cidrhost", fn: cidr.HostFunc},
|
||||||
"contains": stdlib.ContainsFunc,
|
{name: "cidrnetmask", fn: cidr.NetmaskFunc},
|
||||||
"convert": typeexpr.ConvertFunc,
|
{name: "cidrsubnet", fn: cidr.SubnetFunc},
|
||||||
"csvdecode": stdlib.CSVDecodeFunc,
|
{name: "cidrsubnets", fn: cidr.SubnetsFunc},
|
||||||
"distinct": stdlib.DistinctFunc,
|
{name: "coalesce", fn: stdlib.CoalesceFunc},
|
||||||
"divide": stdlib.DivideFunc,
|
{name: "coalescelist", fn: stdlib.CoalesceListFunc},
|
||||||
"element": stdlib.ElementFunc,
|
{name: "compact", fn: stdlib.CompactFunc},
|
||||||
"equal": stdlib.EqualFunc,
|
{name: "concat", fn: stdlib.ConcatFunc},
|
||||||
"flatten": stdlib.FlattenFunc,
|
{name: "contains", fn: stdlib.ContainsFunc},
|
||||||
"floor": stdlib.FloorFunc,
|
{name: "convert", fn: typeexpr.ConvertFunc},
|
||||||
"format": stdlib.FormatFunc,
|
{name: "csvdecode", fn: stdlib.CSVDecodeFunc},
|
||||||
"formatdate": stdlib.FormatDateFunc,
|
{name: "dirname", factory: dirnameFunc},
|
||||||
"formatlist": stdlib.FormatListFunc,
|
{name: "distinct", fn: stdlib.DistinctFunc},
|
||||||
"greaterthan": stdlib.GreaterThanFunc,
|
{name: "divide", fn: stdlib.DivideFunc},
|
||||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
{name: "element", fn: stdlib.ElementFunc},
|
||||||
"hasindex": stdlib.HasIndexFunc,
|
{name: "equal", fn: stdlib.EqualFunc},
|
||||||
"indent": stdlib.IndentFunc,
|
{name: "flatten", fn: stdlib.FlattenFunc},
|
||||||
"index": stdlib.IndexFunc,
|
{name: "floor", fn: stdlib.FloorFunc},
|
||||||
"int": stdlib.IntFunc,
|
{name: "format", fn: stdlib.FormatFunc},
|
||||||
"join": stdlib.JoinFunc,
|
{name: "formatdate", fn: stdlib.FormatDateFunc},
|
||||||
"jsondecode": stdlib.JSONDecodeFunc,
|
{name: "formatlist", fn: stdlib.FormatListFunc},
|
||||||
"jsonencode": stdlib.JSONEncodeFunc,
|
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
|
||||||
"keys": stdlib.KeysFunc,
|
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
|
||||||
"length": stdlib.LengthFunc,
|
{name: "hasindex", fn: stdlib.HasIndexFunc},
|
||||||
"lessthan": stdlib.LessThanFunc,
|
{name: "indent", fn: stdlib.IndentFunc},
|
||||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
{name: "index", fn: stdlib.IndexFunc},
|
||||||
"log": stdlib.LogFunc,
|
{name: "indexof", factory: indexOfFunc},
|
||||||
"lookup": stdlib.LookupFunc,
|
{name: "int", fn: stdlib.IntFunc},
|
||||||
"lower": stdlib.LowerFunc,
|
{name: "join", fn: stdlib.JoinFunc},
|
||||||
"max": stdlib.MaxFunc,
|
{name: "jsondecode", fn: stdlib.JSONDecodeFunc},
|
||||||
"md5": crypto.Md5Func,
|
{name: "jsonencode", fn: stdlib.JSONEncodeFunc},
|
||||||
"merge": stdlib.MergeFunc,
|
{name: "keys", fn: stdlib.KeysFunc},
|
||||||
"min": stdlib.MinFunc,
|
{name: "length", fn: stdlib.LengthFunc},
|
||||||
"modulo": stdlib.ModuloFunc,
|
{name: "lessthan", fn: stdlib.LessThanFunc},
|
||||||
"multiply": stdlib.MultiplyFunc,
|
{name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc},
|
||||||
"negate": stdlib.NegateFunc,
|
{name: "log", fn: stdlib.LogFunc},
|
||||||
"not": stdlib.NotFunc,
|
{name: "lookup", fn: stdlib.LookupFunc},
|
||||||
"notequal": stdlib.NotEqualFunc,
|
{name: "lower", fn: stdlib.LowerFunc},
|
||||||
"or": stdlib.OrFunc,
|
{name: "max", fn: stdlib.MaxFunc},
|
||||||
"parseint": stdlib.ParseIntFunc,
|
{name: "md5", fn: crypto.Md5Func},
|
||||||
"pow": stdlib.PowFunc,
|
{name: "merge", fn: stdlib.MergeFunc},
|
||||||
"range": stdlib.RangeFunc,
|
{name: "min", fn: stdlib.MinFunc},
|
||||||
"regex_replace": stdlib.RegexReplaceFunc,
|
{name: "modulo", fn: stdlib.ModuloFunc},
|
||||||
"regex": stdlib.RegexFunc,
|
{name: "multiply", fn: stdlib.MultiplyFunc},
|
||||||
"regexall": stdlib.RegexAllFunc,
|
{name: "negate", fn: stdlib.NegateFunc},
|
||||||
"replace": stdlib.ReplaceFunc,
|
{name: "not", fn: stdlib.NotFunc},
|
||||||
"reverse": stdlib.ReverseFunc,
|
{name: "notequal", fn: stdlib.NotEqualFunc},
|
||||||
"reverselist": stdlib.ReverseListFunc,
|
{name: "or", fn: stdlib.OrFunc},
|
||||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
{name: "parseint", fn: stdlib.ParseIntFunc},
|
||||||
"sethaselement": stdlib.SetHasElementFunc,
|
{name: "pow", fn: stdlib.PowFunc},
|
||||||
"setintersection": stdlib.SetIntersectionFunc,
|
{name: "range", fn: stdlib.RangeFunc},
|
||||||
"setproduct": stdlib.SetProductFunc,
|
{name: "regex_replace", fn: stdlib.RegexReplaceFunc},
|
||||||
"setsubtract": stdlib.SetSubtractFunc,
|
{name: "regex", fn: stdlib.RegexFunc},
|
||||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
{name: "regexall", fn: stdlib.RegexAllFunc},
|
||||||
"setunion": stdlib.SetUnionFunc,
|
{name: "replace", fn: stdlib.ReplaceFunc},
|
||||||
"sha1": crypto.Sha1Func,
|
{name: "reverse", fn: stdlib.ReverseFunc},
|
||||||
"sha256": crypto.Sha256Func,
|
{name: "reverselist", fn: stdlib.ReverseListFunc},
|
||||||
"sha512": crypto.Sha512Func,
|
{name: "rsadecrypt", fn: crypto.RsaDecryptFunc},
|
||||||
"signum": stdlib.SignumFunc,
|
{name: "sanitize", factory: sanitizeFunc},
|
||||||
"slice": stdlib.SliceFunc,
|
{name: "sethaselement", fn: stdlib.SetHasElementFunc},
|
||||||
"sort": stdlib.SortFunc,
|
{name: "setintersection", fn: stdlib.SetIntersectionFunc},
|
||||||
"split": stdlib.SplitFunc,
|
{name: "setproduct", fn: stdlib.SetProductFunc},
|
||||||
"strlen": stdlib.StrlenFunc,
|
{name: "setsubtract", fn: stdlib.SetSubtractFunc},
|
||||||
"substr": stdlib.SubstrFunc,
|
{name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc},
|
||||||
"subtract": stdlib.SubtractFunc,
|
{name: "setunion", fn: stdlib.SetUnionFunc},
|
||||||
"timeadd": stdlib.TimeAddFunc,
|
{name: "sha1", fn: crypto.Sha1Func},
|
||||||
"timestamp": timestampFunc,
|
{name: "sha256", fn: crypto.Sha256Func},
|
||||||
"title": stdlib.TitleFunc,
|
{name: "sha512", fn: crypto.Sha512Func},
|
||||||
"trim": stdlib.TrimFunc,
|
{name: "signum", fn: stdlib.SignumFunc},
|
||||||
"trimprefix": stdlib.TrimPrefixFunc,
|
{name: "slice", fn: stdlib.SliceFunc},
|
||||||
"trimspace": stdlib.TrimSpaceFunc,
|
{name: "sort", fn: stdlib.SortFunc},
|
||||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
{name: "split", fn: stdlib.SplitFunc},
|
||||||
"try": tryfunc.TryFunc,
|
{name: "strlen", fn: stdlib.StrlenFunc},
|
||||||
"upper": stdlib.UpperFunc,
|
{name: "substr", fn: stdlib.SubstrFunc},
|
||||||
"urlencode": encoding.URLEncodeFunc,
|
{name: "subtract", fn: stdlib.SubtractFunc},
|
||||||
"uuidv4": uuid.V4Func,
|
{name: "timeadd", fn: stdlib.TimeAddFunc},
|
||||||
"uuidv5": uuid.V5Func,
|
{name: "timestamp", factory: timestampFunc},
|
||||||
"values": stdlib.ValuesFunc,
|
{name: "title", fn: stdlib.TitleFunc},
|
||||||
"zipmap": stdlib.ZipmapFunc,
|
{name: "trim", fn: stdlib.TrimFunc},
|
||||||
|
{name: "trimprefix", fn: stdlib.TrimPrefixFunc},
|
||||||
|
{name: "trimspace", fn: stdlib.TrimSpaceFunc},
|
||||||
|
{name: "trimsuffix", fn: stdlib.TrimSuffixFunc},
|
||||||
|
{name: "try", fn: tryfunc.TryFunc},
|
||||||
|
{name: "upper", fn: stdlib.UpperFunc},
|
||||||
|
{name: "urlencode", fn: encoding.URLEncodeFunc},
|
||||||
|
{name: "uuidv4", fn: uuid.V4Func},
|
||||||
|
{name: "uuidv5", fn: uuid.V5Func},
|
||||||
|
{name: "values", fn: stdlib.ValuesFunc},
|
||||||
|
{name: "zipmap", fn: stdlib.ZipmapFunc},
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexOfFunc constructs a function that finds the element index for a given
|
||||||
|
// value in a list.
|
||||||
|
func indexOfFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "list",
|
||||||
|
Type: cty.DynamicPseudoType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "value",
|
||||||
|
Type: cty.DynamicPseudoType,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.Number),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||||
|
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||||
|
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !args[0].IsKnown() {
|
||||||
|
return cty.UnknownVal(cty.Number), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[0].LengthInt() == 0 { // Easy path
|
||||||
|
return cty.NilVal, errors.New("cannot search an empty list")
|
||||||
|
}
|
||||||
|
|
||||||
|
for it := args[0].ElementIterator(); it.Next(); {
|
||||||
|
i, v := it.Element()
|
||||||
|
eq, err := stdlib.Equal(v, args[1])
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilVal, err
|
||||||
|
}
|
||||||
|
if !eq.IsKnown() {
|
||||||
|
return cty.UnknownVal(cty.Number), nil
|
||||||
|
}
|
||||||
|
if eq.True() {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.NilVal, errors.New("item not found")
|
||||||
|
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// basenameFunc constructs a function that returns the last element of a path.
|
||||||
|
func basenameFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "path",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
return cty.StringVal(path.Base(in)), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirnameFunc constructs a function that returns the directory of a path.
|
||||||
|
func dirnameFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "path",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
return cty.StringVal(path.Dir(in)), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore,
|
||||||
|
// leaving only characters that are valid for a Bake target name.
|
||||||
|
func sanitizeFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
|
Params: []function.Parameter{
|
||||||
|
{
|
||||||
|
Name: "name",
|
||||||
|
Type: cty.String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: function.StaticReturnType(cty.String),
|
||||||
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
|
in := args[0].AsString()
|
||||||
|
// only [a-zA-Z0-9_-]+ is allowed
|
||||||
|
var b strings.Builder
|
||||||
|
for _, r := range in {
|
||||||
|
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' {
|
||||||
|
b.WriteRune(r)
|
||||||
|
} else {
|
||||||
|
b.WriteRune('_')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cty.StringVal(b.String()), nil
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
||||||
//
|
//
|
||||||
// This function was imported from terraform's datetime utilities.
|
// This function was imported from terraform's datetime utilities.
|
||||||
var timestampFunc = function.New(&function.Spec{
|
func timestampFunc() function.Function {
|
||||||
|
return function.New(&function.Spec{
|
||||||
Params: []function.Parameter{},
|
Params: []function.Parameter{},
|
||||||
Type: function.StaticReturnType(cty.String),
|
Type: function.StaticReturnType(cty.String),
|
||||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func Stdlib() map[string]function.Function {
|
func Stdlib() map[string]function.Function {
|
||||||
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
||||||
for k, v := range stdlibFunctions {
|
for _, v := range stdlibFunctions {
|
||||||
funcs[k] = v
|
if v.factory != nil {
|
||||||
|
funcs[v.name] = v.factory()
|
||||||
|
} else {
|
||||||
|
funcs[v.name] = v.fn
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return funcs
|
return funcs
|
||||||
}
|
}
|
||||||
|
|||||||
199
bake/hclparser/stdlib_test.go
Normal file
199
bake/hclparser/stdlib_test.go
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIndexOf(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
key cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"index 0": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("one"),
|
||||||
|
want: cty.NumberIntVal(0),
|
||||||
|
},
|
||||||
|
"index 3": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("four"),
|
||||||
|
want: cty.NumberIntVal(3),
|
||||||
|
},
|
||||||
|
"index -1": {
|
||||||
|
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||||
|
key: cty.StringVal("3"),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := indexOfFunc().Call([]cty.Value{test.input, test.key})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasename(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal("."),
|
||||||
|
},
|
||||||
|
"slash": {
|
||||||
|
input: cty.StringVal("/"),
|
||||||
|
want: cty.StringVal("/"),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("/foo/bar"),
|
||||||
|
want: cty.StringVal("bar"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("bar"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("/foo/bar."),
|
||||||
|
want: cty.StringVal("bar."),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar.."),
|
||||||
|
want: cty.StringVal("bar.."),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar..."),
|
||||||
|
want: cty.StringVal("bar..."),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := basenameFunc().Call([]cty.Value{test.input})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDirname(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
wantErr bool
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal("."),
|
||||||
|
},
|
||||||
|
"slash": {
|
||||||
|
input: cty.StringVal("/"),
|
||||||
|
want: cty.StringVal("/"),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("/foo/bar"),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("foo"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("/foo/bar."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar.."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("/foo/bar..."),
|
||||||
|
want: cty.StringVal("/foo"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := dirnameFunc().Call([]cty.Value{test.input})
|
||||||
|
if test.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSanitize(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
input cty.Value
|
||||||
|
want cty.Value
|
||||||
|
}
|
||||||
|
tests := map[string]testCase{
|
||||||
|
"empty": {
|
||||||
|
input: cty.StringVal(""),
|
||||||
|
want: cty.StringVal(""),
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
input: cty.StringVal("foo/bar"),
|
||||||
|
want: cty.StringVal("foo_bar"),
|
||||||
|
},
|
||||||
|
"simple no slash": {
|
||||||
|
input: cty.StringVal("foobar"),
|
||||||
|
want: cty.StringVal("foobar"),
|
||||||
|
},
|
||||||
|
"dot": {
|
||||||
|
input: cty.StringVal("foo/bar."),
|
||||||
|
want: cty.StringVal("foo_bar_"),
|
||||||
|
},
|
||||||
|
"dotdot": {
|
||||||
|
input: cty.StringVal("foo/bar.."),
|
||||||
|
want: cty.StringVal("foo_bar__"),
|
||||||
|
},
|
||||||
|
"dotdotdot": {
|
||||||
|
input: cty.StringVal("foo/bar..."),
|
||||||
|
want: cty.StringVal("foo_bar___"),
|
||||||
|
},
|
||||||
|
"utf8": {
|
||||||
|
input: cty.StringVal("foo/🍕bar"),
|
||||||
|
want: cty.StringVal("foo__bar"),
|
||||||
|
},
|
||||||
|
"symbols": {
|
||||||
|
input: cty.StringVal("foo/bar!@(ba+z)"),
|
||||||
|
want: cty.StringVal("foo_bar___ba_z_"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, test := range tests {
|
||||||
|
name, test := name, test
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := sanitizeFunc().Call([]cty.Value{test.input})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,6 +4,8 @@ import (
|
|||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
@@ -23,13 +25,34 @@ type Input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||||
var session []session.Attachable
|
var sessions []session.Attachable
|
||||||
var filename string
|
var filename string
|
||||||
|
|
||||||
st, ok := dockerui.DetectGitContext(url, false)
|
st, ok := dockerui.DetectGitContext(url, false)
|
||||||
if ok {
|
if ok {
|
||||||
ssh, err := controllerapi.CreateSSH([]*controllerapi.SSH{{ID: "default"}})
|
if ssh, err := controllerapi.CreateSSH([]*controllerapi.SSH{{
|
||||||
if err == nil {
|
ID: "default",
|
||||||
session = append(session, ssh)
|
Paths: strings.Split(os.Getenv("BUILDX_BAKE_GIT_SSH"), ","),
|
||||||
|
}}); err == nil {
|
||||||
|
sessions = append(sessions, ssh)
|
||||||
|
}
|
||||||
|
var gitAuthSecrets []*controllerapi.Secret
|
||||||
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
|
||||||
|
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||||
|
ID: llb.GitAuthTokenKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
|
||||||
|
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||||
|
ID: llb.GitAuthHeaderKey,
|
||||||
|
Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if len(gitAuthSecrets) > 0 {
|
||||||
|
if secrets, err := controllerapi.CreateSecrets(gitAuthSecrets); err == nil {
|
||||||
|
sessions = append(sessions, secrets)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
st, filename, ok = dockerui.DetectHTTPContext(url)
|
st, filename, ok = dockerui.DetectHTTPContext(url)
|
||||||
@@ -59,7 +82,7 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
|||||||
|
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
_, err = c.Build(ctx, client.SolveOpt{Session: session}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
_, err = c.Build(ctx, client.SolveOpt{Session: sessions, Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||||
def, err := st.Marshal(ctx)
|
def, err := st.Marshal(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
1336
build/build.go
1336
build/build.go
File diff suppressed because it is too large
Load Diff
62
build/dial.go
Normal file
62
build/dial.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
stderrors "errors"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platform *v1.Platform) (net.Conn, error) {
|
||||||
|
nodes, err := filterAvailableNodes(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, errors.New("no nodes available")
|
||||||
|
}
|
||||||
|
|
||||||
|
var pls []v1.Platform
|
||||||
|
if platform != nil {
|
||||||
|
pls = []v1.Platform{*platform}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := map[string]Options{"default": {Platforms: pls}}
|
||||||
|
resolved, err := resolveDrivers(ctx, nodes, opts, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dialError error
|
||||||
|
for _, ls := range resolved {
|
||||||
|
for _, rn := range ls {
|
||||||
|
if platform != nil {
|
||||||
|
p := *platform
|
||||||
|
var found bool
|
||||||
|
for _, pp := range rn.platforms {
|
||||||
|
if platforms.Only(p).Match(pp) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := nodes[rn.driverIndex].Driver.Dial(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
dialError = stderrors.Join(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Wrap(dialError, "no nodes available")
|
||||||
|
}
|
||||||
352
build/driver.go
Normal file
352
build/driver.go
Normal file
@@ -0,0 +1,352 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/util/flightcontrol"
|
||||||
|
"github.com/moby/buildkit/util/tracing"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resolvedNode struct {
|
||||||
|
resolver *nodeResolver
|
||||||
|
driverIndex int
|
||||||
|
platforms []specs.Platform
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) Node() builder.Node {
|
||||||
|
return dp.resolver.nodes[dp.driverIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) Client(ctx context.Context) (*client.Client, error) {
|
||||||
|
clients, err := dp.resolver.boot(ctx, []int{dp.driverIndex}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return clients[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp resolvedNode) BuildOpts(ctx context.Context) (gateway.BuildOpts, error) {
|
||||||
|
opts, err := dp.resolver.opts(ctx, []int{dp.driverIndex}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return gateway.BuildOpts{}, err
|
||||||
|
}
|
||||||
|
return opts[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type matchMaker func(specs.Platform) platforms.MatchComparer
|
||||||
|
|
||||||
|
type cachedGroup[T any] struct {
|
||||||
|
g flightcontrol.Group[T]
|
||||||
|
cache map[int]T
|
||||||
|
cacheMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCachedGroup[T any]() cachedGroup[T] {
|
||||||
|
return cachedGroup[T]{
|
||||||
|
cache: map[int]T{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeResolver struct {
|
||||||
|
nodes []builder.Node
|
||||||
|
clients cachedGroup[*client.Client]
|
||||||
|
buildOpts cachedGroup[gateway.BuildOpts]
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||||
|
driverRes := newDriverResolver(nodes)
|
||||||
|
drivers, err := driverRes.Resolve(ctx, opt, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return drivers, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDriverResolver(nodes []builder.Node) *nodeResolver {
|
||||||
|
r := &nodeResolver{
|
||||||
|
nodes: nodes,
|
||||||
|
clients: newCachedGroup[*client.Client](),
|
||||||
|
buildOpts: newCachedGroup[gateway.BuildOpts](),
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) Resolve(ctx context.Context, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||||
|
if len(r.nodes) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes := map[string][]*resolvedNode{}
|
||||||
|
for k, opt := range opt {
|
||||||
|
node, perfect, err := r.resolve(ctx, opt.Platforms, pw, platforms.OnlyStrict, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !perfect {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nodes[k] = node
|
||||||
|
}
|
||||||
|
if len(nodes) != len(opt) {
|
||||||
|
// if we didn't get a perfect match, we need to boot all drivers
|
||||||
|
allIndexes := make([]int, len(r.nodes))
|
||||||
|
for i := range allIndexes {
|
||||||
|
allIndexes[i] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
clients, err := r.boot(ctx, allIndexes, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
|
workers := make([][]specs.Platform, len(clients))
|
||||||
|
for i, c := range clients {
|
||||||
|
i, c := i, c
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
ww, err := c.ListWorkers(egCtx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "listing workers")
|
||||||
|
}
|
||||||
|
|
||||||
|
ps := make(map[string]specs.Platform, len(ww))
|
||||||
|
for _, w := range ww {
|
||||||
|
for _, p := range w.Platforms {
|
||||||
|
pk := platforms.Format(platforms.Normalize(p))
|
||||||
|
ps[pk] = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, p := range ps {
|
||||||
|
workers[i] = append(workers[i], p)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// then we can attempt to match against all the available platforms
|
||||||
|
// (this time we don't care about imperfect matches)
|
||||||
|
nodes = map[string][]*resolvedNode{}
|
||||||
|
for k, opt := range opt {
|
||||||
|
node, _, err := r.resolve(ctx, opt.Platforms, pw, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||||
|
return workers[idx]
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nodes[k] = node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idxs := make([]int, 0, len(r.nodes))
|
||||||
|
for _, nodes := range nodes {
|
||||||
|
for _, node := range nodes {
|
||||||
|
idxs = append(idxs, node.driverIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// preload capabilities
|
||||||
|
span, ctx := tracing.StartSpan(ctx, "load buildkit capabilities", trace.WithSpanKind(trace.SpanKindInternal))
|
||||||
|
_, err := r.opts(ctx, idxs, pw)
|
||||||
|
tracing.FinishWithError(span, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) resolve(ctx context.Context, ps []specs.Platform, pw progress.Writer, matcher matchMaker, additional func(idx int, n builder.Node) []specs.Platform) ([]*resolvedNode, bool, error) {
|
||||||
|
if len(r.nodes) == 0 {
|
||||||
|
return nil, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
perfect := true
|
||||||
|
nodeIdxs := make([]int, 0)
|
||||||
|
for _, p := range ps {
|
||||||
|
idx := r.get(p, matcher, additional)
|
||||||
|
if idx == -1 {
|
||||||
|
idx = 0
|
||||||
|
perfect = false
|
||||||
|
}
|
||||||
|
nodeIdxs = append(nodeIdxs, idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodes []*resolvedNode
|
||||||
|
if len(nodeIdxs) == 0 {
|
||||||
|
nodes = append(nodes, &resolvedNode{
|
||||||
|
resolver: r,
|
||||||
|
driverIndex: 0,
|
||||||
|
})
|
||||||
|
nodeIdxs = append(nodeIdxs, 0)
|
||||||
|
} else {
|
||||||
|
for i, idx := range nodeIdxs {
|
||||||
|
node := &resolvedNode{
|
||||||
|
resolver: r,
|
||||||
|
driverIndex: idx,
|
||||||
|
}
|
||||||
|
if len(ps) > 0 {
|
||||||
|
node.platforms = []specs.Platform{ps[i]}
|
||||||
|
}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = recombineNodes(nodes)
|
||||||
|
if _, err := r.boot(ctx, nodeIdxs, pw); err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return nodes, perfect, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatforms func(int, builder.Node) []specs.Platform) int {
|
||||||
|
best := -1
|
||||||
|
bestPlatform := specs.Platform{}
|
||||||
|
for i, node := range r.nodes {
|
||||||
|
platforms := node.Platforms
|
||||||
|
if additionalPlatforms != nil {
|
||||||
|
platforms = append([]specs.Platform{}, platforms...)
|
||||||
|
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||||
|
}
|
||||||
|
for _, p2 := range platforms {
|
||||||
|
m := matcher(p2)
|
||||||
|
if !m.Match(p) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if best == -1 {
|
||||||
|
best = i
|
||||||
|
bestPlatform = p2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if matcher(p2).Less(p, bestPlatform) {
|
||||||
|
best = i
|
||||||
|
bestPlatform = p2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return best
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) boot(ctx context.Context, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
||||||
|
clients := make([]*client.Client, len(idxs))
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
for i, idx := range idxs {
|
||||||
|
i, idx := i, idx
|
||||||
|
eg.Go(func() error {
|
||||||
|
c, err := r.clients.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (*client.Client, error) {
|
||||||
|
if r.nodes[idx].Driver == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
r.clients.cacheMu.Lock()
|
||||||
|
c, ok := r.clients.cache[idx]
|
||||||
|
r.clients.cacheMu.Unlock()
|
||||||
|
if ok {
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
c, err := driver.Boot(ctx, baseCtx, r.nodes[idx].Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.clients.cacheMu.Lock()
|
||||||
|
r.clients.cache[idx] = c
|
||||||
|
r.clients.cacheMu.Unlock()
|
||||||
|
return c, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clients[i] = c
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return clients, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nodeResolver) opts(ctx context.Context, idxs []int, pw progress.Writer) ([]gateway.BuildOpts, error) {
|
||||||
|
clients, err := r.boot(ctx, idxs, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bopts := make([]gateway.BuildOpts, len(clients))
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, idxs := range idxs {
|
||||||
|
i, idx := i, idxs
|
||||||
|
c := clients[i]
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
opt, err := r.buildOpts.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (gateway.BuildOpts, error) {
|
||||||
|
r.buildOpts.cacheMu.Lock()
|
||||||
|
opt, ok := r.buildOpts.cache[idx]
|
||||||
|
r.buildOpts.cacheMu.Unlock()
|
||||||
|
if ok {
|
||||||
|
return opt, nil
|
||||||
|
}
|
||||||
|
_, err := c.Build(ctx, client.SolveOpt{
|
||||||
|
Internal: true,
|
||||||
|
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
opt = c.BuildOpts()
|
||||||
|
return nil, nil
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return gateway.BuildOpts{}, err
|
||||||
|
}
|
||||||
|
r.buildOpts.cacheMu.Lock()
|
||||||
|
r.buildOpts.cache[idx] = opt
|
||||||
|
r.buildOpts.cacheMu.Unlock()
|
||||||
|
return opt, err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bopts[i] = opt
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bopts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// recombineDriverPairs recombines resolved nodes that are on the same driver
|
||||||
|
// back together into a single node.
|
||||||
|
func recombineNodes(nodes []*resolvedNode) []*resolvedNode {
|
||||||
|
result := make([]*resolvedNode, 0, len(nodes))
|
||||||
|
lookup := map[int]int{}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if idx, ok := lookup[node.driverIndex]; ok {
|
||||||
|
result[idx].platforms = append(result[idx].platforms, node.platforms...)
|
||||||
|
} else {
|
||||||
|
lookup[node.driverIndex] = len(result)
|
||||||
|
result = append(result, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
315
build/driver_test.go
Normal file
315
build/driver_test.go
Normal file
@@ -0,0 +1,315 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFindDriverSanity(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.DefaultSpec()},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.OnlyStrict, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, []specs.Platform{platforms.DefaultSpec()}, res[0].platforms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverEmpty(t *testing.T) {
|
||||||
|
r := makeTestResolver(nil)
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Nil(t, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverWeirdName(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/foobar")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// find first platform
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/foobar")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindDriverUnknown(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeSinglePlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// find first platform
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// find second platform
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// find an unknown platform, should match the first driver
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/s390x")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeMultiPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 0, res[0].driverIndex)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, 1, res[0].driverIndex)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrict(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// arm64 should match itself
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// arm64 may support arm/v8
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// arm64 may support arm/v7
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrictARM(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm64")},
|
||||||
|
"ccc": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNonStrictLower(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// v8 can't be built on v7 (so we should select the default)...
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
// ...but v6 can be built on v8
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v6")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodePreferStart(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
"ccc": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodePreferExact(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeNoPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/foobar")},
|
||||||
|
"bbb": {platforms.DefaultSpec()},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Empty(t, res[0].platforms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectNodeAdditionalPlatforms(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/arm/v8")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||||
|
if n.Builder == "aaa" {
|
||||||
|
return []specs.Platform{platforms.MustParse("linux/arm/v7")}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplitNodeMultiPlatform(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/arm64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 1)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
|
||||||
|
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/riscv64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 2)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplitNodeMultiPlatformNoUnify(t *testing.T) {
|
||||||
|
r := makeTestResolver(map[string][]specs.Platform{
|
||||||
|
"aaa": {platforms.MustParse("linux/amd64")},
|
||||||
|
"bbb": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/riscv64")},
|
||||||
|
})
|
||||||
|
|
||||||
|
// the "best" choice would be the node with both platforms, but we're using
|
||||||
|
// a naive algorithm that doesn't try to unify the platforms
|
||||||
|
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||||
|
platforms.MustParse("linux/amd64"),
|
||||||
|
platforms.MustParse("linux/riscv64"),
|
||||||
|
}, nil, platforms.Only, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, perfect)
|
||||||
|
require.Len(t, res, 2)
|
||||||
|
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||||
|
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestResolver(nodes map[string][]specs.Platform) *nodeResolver {
|
||||||
|
var ns []builder.Node
|
||||||
|
for name, platforms := range nodes {
|
||||||
|
ns = append(ns, builder.Node{
|
||||||
|
Builder: name,
|
||||||
|
Platforms: platforms,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Slice(ns, func(i, j int) bool {
|
||||||
|
return ns[i].Builder < ns[j].Builder
|
||||||
|
})
|
||||||
|
return newDriverResolver(ns)
|
||||||
|
}
|
||||||
89
build/git.go
89
build/git.go
@@ -9,16 +9,27 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/gitutil"
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
||||||
|
|
||||||
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (res map[string]string, _ error) {
|
type gitAttrsAppendFunc func(so *client.SolveOpt)
|
||||||
res = make(map[string]string)
|
|
||||||
|
func gitAppendNoneFunc(_ *client.SolveOpt) {}
|
||||||
|
|
||||||
|
func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) {
|
||||||
|
defer func() {
|
||||||
|
if f == nil {
|
||||||
|
f = gitAppendNoneFunc
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if contextPath == "" {
|
if contextPath == "" {
|
||||||
return
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
setGitLabels := false
|
setGitLabels := false
|
||||||
@@ -37,7 +48,7 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !setGitLabels && !setGitInfo {
|
if !setGitLabels && !setGitInfo {
|
||||||
return
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// figure out in which directory the git command needs to run in
|
// figure out in which directory the git command needs to run in
|
||||||
@@ -45,27 +56,34 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
|||||||
if filepath.IsAbs(contextPath) {
|
if filepath.IsAbs(contextPath) {
|
||||||
wd = contextPath
|
wd = contextPath
|
||||||
} else {
|
} else {
|
||||||
cwd, _ := os.Getwd()
|
wd, _ = filepath.Abs(filepath.Join(osutil.GetWd(), contextPath))
|
||||||
wd, _ = filepath.Abs(filepath.Join(cwd, contextPath))
|
|
||||||
}
|
}
|
||||||
|
wd = osutil.SanitizePath(wd)
|
||||||
|
|
||||||
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||||
return res, errors.New("buildx: git was not found in the system. Current commit information was not captured by the build")
|
return nil, errors.Wrap(err, "git was not found in the system")
|
||||||
}
|
}
|
||||||
return
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !gitc.IsInsideWorkTree() {
|
if !gitc.IsInsideWorkTree() {
|
||||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||||
return res, errors.New("buildx: failed to read current commit information with git rev-parse --is-inside-work-tree")
|
return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||||
}
|
}
|
||||||
return res, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
root, err := gitc.RootDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get git root dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(map[string]string)
|
||||||
|
|
||||||
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
||||||
return res, errors.Wrapf(err, "buildx: failed to get git commit")
|
return nil, errors.Wrap(err, "failed to get git commit")
|
||||||
} else if sha != "" {
|
} else if sha != "" {
|
||||||
checkDirty := false
|
checkDirty := false
|
||||||
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
||||||
@@ -93,23 +111,50 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if setGitLabels {
|
if setGitLabels && root != "" {
|
||||||
if root, err := gitc.RootDir(); err != nil {
|
|
||||||
return res, errors.Wrapf(err, "buildx: failed to get git root dir")
|
|
||||||
} else if root != "" {
|
|
||||||
if dockerfilePath == "" {
|
if dockerfilePath == "" {
|
||||||
dockerfilePath = filepath.Join(wd, "Dockerfile")
|
dockerfilePath = filepath.Join(wd, "Dockerfile")
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(dockerfilePath) {
|
if !filepath.IsAbs(dockerfilePath) {
|
||||||
cwd, _ := os.Getwd()
|
dockerfilePath = filepath.Join(osutil.GetWd(), dockerfilePath)
|
||||||
dockerfilePath = filepath.Join(cwd, dockerfilePath)
|
|
||||||
}
|
|
||||||
dockerfilePath, _ = filepath.Rel(root, dockerfilePath)
|
|
||||||
if !strings.HasPrefix(dockerfilePath, "..") {
|
|
||||||
res["label:"+DockerfileLabel] = dockerfilePath
|
|
||||||
}
|
}
|
||||||
|
if r, err := filepath.Rel(root, dockerfilePath); err == nil && !strings.HasPrefix(r, "..") {
|
||||||
|
res["label:"+DockerfileLabel] = r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return func(so *client.SolveOpt) {
|
||||||
|
if so.FrontendAttrs == nil {
|
||||||
|
so.FrontendAttrs = make(map[string]string)
|
||||||
|
}
|
||||||
|
for k, v := range res {
|
||||||
|
so.FrontendAttrs[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if !setGitInfo || root == "" {
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, mount := range so.LocalMounts {
|
||||||
|
fs, ok := mount.(*fs)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir, err = filepath.Abs(dir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||||
|
dir = lp
|
||||||
|
}
|
||||||
|
dir = osutil.SanitizePath(dir)
|
||||||
|
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||||
|
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/gitutil"
|
"github.com/docker/buildx/util/gitutil"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -45,9 +46,11 @@ func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
|||||||
func TestGetGitAttributesNoContext(t *testing.T) {
|
func TestGetGitAttributesNoContext(t *testing.T) {
|
||||||
setupTest(t)
|
setupTest(t)
|
||||||
|
|
||||||
gitattrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Empty(t, gitattrs)
|
var so client.SolveOpt
|
||||||
|
addGitAttrs(&so)
|
||||||
|
assert.Empty(t, so.FrontendAttrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGitAttributes(t *testing.T) {
|
func TestGetGitAttributes(t *testing.T) {
|
||||||
@@ -114,15 +117,17 @@ func TestGetGitAttributes(t *testing.T) {
|
|||||||
if tt.envGitInfo != "" {
|
if tt.envGitInfo != "" {
|
||||||
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
||||||
}
|
}
|
||||||
gitattrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
var so client.SolveOpt
|
||||||
|
addGitAttrs(&so)
|
||||||
for _, e := range tt.expected {
|
for _, e := range tt.expected {
|
||||||
assert.Contains(t, gitattrs, e)
|
assert.Contains(t, so.FrontendAttrs, e)
|
||||||
assert.NotEmpty(t, gitattrs[e])
|
assert.NotEmpty(t, so.FrontendAttrs[e])
|
||||||
if e == "label:"+DockerfileLabel {
|
if e == "label:"+DockerfileLabel {
|
||||||
assert.Equal(t, "Dockerfile", gitattrs[e])
|
assert.Equal(t, "Dockerfile", so.FrontendAttrs[e])
|
||||||
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
||||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e])
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -139,18 +144,78 @@ func TestGetGitAttributesDirty(t *testing.T) {
|
|||||||
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
||||||
|
|
||||||
t.Setenv("BUILDX_GIT_LABELS", "true")
|
t.Setenv("BUILDX_GIT_LABELS", "true")
|
||||||
gitattrs, _ := getGitAttributes(context.Background(), ".", "Dockerfile")
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
assert.Equal(t, 5, len(gitattrs))
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
|
var so client.SolveOpt
|
||||||
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
|
addGitAttrs(&so)
|
||||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
|
|
||||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
|
|
||||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
|
|
||||||
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
|
|
||||||
|
|
||||||
assert.Contains(t, gitattrs, "vcs:source")
|
assert.Equal(t, 5, len(so.FrontendAttrs))
|
||||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
|
|
||||||
assert.Contains(t, gitattrs, "vcs:revision")
|
assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel)
|
||||||
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
|
assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource)
|
||||||
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision)
|
||||||
|
assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||||
|
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "vcs:source")
|
||||||
|
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"])
|
||||||
|
assert.Contains(t, so.FrontendAttrs, "vcs:revision")
|
||||||
|
assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalDirs(t *testing.T) {
|
||||||
|
setupTest(t)
|
||||||
|
|
||||||
|
so := &client.SolveOpt{
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, setLocalMount("context", ".", so))
|
||||||
|
require.NoError(t, setLocalMount("dockerfile", ".", so))
|
||||||
|
|
||||||
|
addGitAttrs(so)
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalDirsSub(t *testing.T) {
|
||||||
|
gitutil.Mktmp(t)
|
||||||
|
|
||||||
|
c, err := gitutil.New()
|
||||||
|
require.NoError(t, err)
|
||||||
|
gitutil.GitInit(c, t)
|
||||||
|
|
||||||
|
df := []byte("FROM alpine:latest\n")
|
||||||
|
assert.NoError(t, os.MkdirAll("app", 0755))
|
||||||
|
assert.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||||
|
|
||||||
|
gitutil.GitAdd(c, t, "app/Dockerfile")
|
||||||
|
gitutil.GitCommit(c, t, "initial commit")
|
||||||
|
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
||||||
|
|
||||||
|
so := &client.SolveOpt{
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
}
|
||||||
|
require.NoError(t, setLocalMount("context", ".", so))
|
||||||
|
require.NoError(t, setLocalMount("dockerfile", "app", so))
|
||||||
|
|
||||||
|
addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
addGitAttrs(so)
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||||
|
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||||
|
|
||||||
|
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||||
|
assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
|||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
containerCfg, err := resultCtx.getContainerConfig(ctx, c, cfg)
|
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
43
build/localstate.go
Normal file
43
build/localstate.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func saveLocalState(so *client.SolveOpt, target string, opts Options, node builder.Node, configDir string) error {
|
||||||
|
var err error
|
||||||
|
if so.Ref == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lp := opts.Inputs.ContextPath
|
||||||
|
dp := opts.Inputs.DockerfilePath
|
||||||
|
if dp != "" && !IsRemoteURL(lp) && lp != "-" && dp != "-" {
|
||||||
|
dp, err = filepath.Abs(dp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lp != "" && !IsRemoteURL(lp) && lp != "-" {
|
||||||
|
lp, err = filepath.Abs(lp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lp == "" && dp == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l, err := localstate.New(configDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.SaveRef(node.Builder, node.Name, so.Ref, localstate.State{
|
||||||
|
Target: target,
|
||||||
|
LocalPath: lp,
|
||||||
|
DockerfilePath: dp,
|
||||||
|
GroupRef: opts.GroupRef,
|
||||||
|
})
|
||||||
|
}
|
||||||
648
build/opt.go
Normal file
648
build/opt.go
Normal file
@@ -0,0 +1,648 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/content/local"
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/distribution/reference"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/client/ociindex"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/tonistiigi/fsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||||
|
nodeDriver := node.Driver
|
||||||
|
defers := make([]func(), 0, 2)
|
||||||
|
releaseF := func() {
|
||||||
|
for _, f := range defers {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
releaseF()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// inline cache from build arg
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_CACHE"]; ok {
|
||||||
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
|
opt.CacheTo = append(opt.CacheTo, client.CacheOptionsEntry{
|
||||||
|
Type: "inline",
|
||||||
|
Attrs: map[string]string{},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range opt.CacheTo {
|
||||||
|
if e.Type != "inline" && !nodeDriver.Features(ctx)[driver.CacheExport] {
|
||||||
|
return nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo := make([]client.CacheOptionsEntry, 0, len(opt.CacheTo))
|
||||||
|
for _, e := range opt.CacheTo {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if e.Type == "s3" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheTo = append(cacheTo, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheFrom := make([]client.CacheOptionsEntry, 0, len(opt.CacheFrom))
|
||||||
|
for _, e := range opt.CacheFrom {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if e.Type == "s3" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheFrom = append(cacheFrom, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
so := client.SolveOpt{
|
||||||
|
Ref: opt.Ref,
|
||||||
|
Frontend: "dockerfile.v0",
|
||||||
|
FrontendAttrs: map[string]string{},
|
||||||
|
LocalMounts: map[string]fsutil.FS{},
|
||||||
|
CacheExports: cacheTo,
|
||||||
|
CacheImports: cacheFrom,
|
||||||
|
AllowedEntitlements: opt.Allow,
|
||||||
|
SourcePolicy: opt.SourcePolicy,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.CgroupParent != "" {
|
||||||
|
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||||
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if multiDriver {
|
||||||
|
// force creation of manifest list
|
||||||
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
attests := make(map[string]string)
|
||||||
|
for k, v := range opt.Attests {
|
||||||
|
if v != nil {
|
||||||
|
attests[k] = *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
supportAttestations := bopts.LLBCaps.Contains(apicaps.CapID("exporter.image.attestations")) && nodeDriver.Features(ctx)[driver.MultiPlatform]
|
||||||
|
if len(attests) > 0 {
|
||||||
|
if !supportAttestations {
|
||||||
|
if !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||||
|
return nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||||
|
}
|
||||||
|
for k, v := range attests {
|
||||||
|
so.FrontendAttrs["attest:"+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := opt.Attests["provenance"]; !ok && supportAttestations {
|
||||||
|
const noAttestEnv = "BUILDX_NO_DEFAULT_ATTESTATIONS"
|
||||||
|
var noProv bool
|
||||||
|
if v, ok := os.LookupEnv(noAttestEnv); ok {
|
||||||
|
noProv, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !noProv {
|
||||||
|
so.FrontendAttrs["attest:provenance"] = "mode=min,inline-only=true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(opt.Exports) {
|
||||||
|
case 1:
|
||||||
|
// valid
|
||||||
|
case 0:
|
||||||
|
if !noDefaultLoad() && opt.CallFunc == nil {
|
||||||
|
if nodeDriver.IsMobyDriver() {
|
||||||
|
// backwards compat for docker driver only:
|
||||||
|
// this ensures the build results in a docker image.
|
||||||
|
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
||||||
|
} else if nodeDriver.Features(ctx)[driver.DefaultLoad] {
|
||||||
|
opt.Exports = []client.ExportEntry{{Type: "docker", Attrs: map[string]string{}}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if err := bopts.LLBCaps.Supports(pb.CapMultipleExporters); err != nil {
|
||||||
|
return nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fill in image exporter names from tags
|
||||||
|
if len(opt.Tags) > 0 {
|
||||||
|
tags := make([]string, len(opt.Tags))
|
||||||
|
for i, tag := range opt.Tags {
|
||||||
|
ref, err := reference.Parse(tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||||
|
}
|
||||||
|
tags[i] = ref.String()
|
||||||
|
}
|
||||||
|
for i, e := range opt.Exports {
|
||||||
|
switch e.Type {
|
||||||
|
case "image", "oci", "docker":
|
||||||
|
opt.Exports[i].Attrs["name"] = strings.Join(tags, ",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, e := range opt.Exports {
|
||||||
|
if e.Type == "image" && e.Attrs["name"] == "" && e.Attrs["push"] != "" {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
return nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheonly is a fake exporter to opt out of default behaviors
|
||||||
|
exports := make([]client.ExportEntry, 0, len(opt.Exports))
|
||||||
|
for _, e := range opt.Exports {
|
||||||
|
if e.Type != "cacheonly" {
|
||||||
|
exports = append(exports, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
opt.Exports = exports
|
||||||
|
|
||||||
|
// set up exporters
|
||||||
|
for i, e := range opt.Exports {
|
||||||
|
if e.Type == "oci" && !nodeDriver.Features(ctx)[driver.OCIExporter] {
|
||||||
|
return nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||||
|
}
|
||||||
|
if e.Type == "docker" {
|
||||||
|
features := docker.Features(ctx, e.Attrs["context"])
|
||||||
|
if features[dockerutil.OCIImporter] && e.Output == nil {
|
||||||
|
// rely on oci importer if available (which supports
|
||||||
|
// multi-platform images), otherwise fall back to docker
|
||||||
|
opt.Exports[i].Type = "oci"
|
||||||
|
} else if len(opt.Platforms) > 1 || len(attests) > 0 {
|
||||||
|
if e.Output != nil {
|
||||||
|
return nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||||
|
}
|
||||||
|
if e.Output == nil {
|
||||||
|
if nodeDriver.IsMobyDriver() {
|
||||||
|
e.Type = "image"
|
||||||
|
} else {
|
||||||
|
w, cancel, err := docker.LoadImage(ctx, e.Attrs["context"], pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defers = append(defers, cancel)
|
||||||
|
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||||
|
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||||
|
opt.Exports[i].Type = "moby"
|
||||||
|
if e.Attrs["push"] != "" {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||||
|
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||||
|
// inline buildinfo attrs from build arg
|
||||||
|
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||||
|
opt.Exports[i].Attrs["buildinfo-attrs"] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
so.Exports = opt.Exports
|
||||||
|
so.Session = slices.Clone(opt.Session)
|
||||||
|
|
||||||
|
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defers = append(defers, releaseLoad)
|
||||||
|
|
||||||
|
// add node identifier to shared key if one was specified
|
||||||
|
if so.SharedKey != "" {
|
||||||
|
so.SharedKey += ":" + confutil.TryNodeIdentifier(configDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Pull {
|
||||||
|
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModeForcePull
|
||||||
|
} else if nodeDriver.IsMobyDriver() {
|
||||||
|
// moby driver always resolves local images by default
|
||||||
|
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModePreferLocal
|
||||||
|
}
|
||||||
|
if opt.Target != "" {
|
||||||
|
so.FrontendAttrs["target"] = opt.Target
|
||||||
|
}
|
||||||
|
if len(opt.NoCacheFilter) > 0 {
|
||||||
|
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
||||||
|
}
|
||||||
|
if opt.NoCache {
|
||||||
|
so.FrontendAttrs["no-cache"] = ""
|
||||||
|
}
|
||||||
|
for k, v := range opt.BuildArgs {
|
||||||
|
so.FrontendAttrs["build-arg:"+k] = v
|
||||||
|
}
|
||||||
|
for k, v := range opt.Labels {
|
||||||
|
so.FrontendAttrs["label:"+k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range node.ProxyConfig {
|
||||||
|
if _, ok := opt.BuildArgs[k]; !ok {
|
||||||
|
so.FrontendAttrs["build-arg:"+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set platforms
|
||||||
|
if len(opt.Platforms) != 0 {
|
||||||
|
pp := make([]string, len(opt.Platforms))
|
||||||
|
for i, p := range opt.Platforms {
|
||||||
|
pp[i] = platforms.Format(p)
|
||||||
|
}
|
||||||
|
if len(pp) > 1 && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||||
|
return nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||||
|
}
|
||||||
|
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup networkmode
|
||||||
|
switch opt.NetworkMode {
|
||||||
|
case "host":
|
||||||
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
||||||
|
case "none":
|
||||||
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
|
case "", "default":
|
||||||
|
default:
|
||||||
|
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup extrahosts
|
||||||
|
extraHosts, err := toBuildkitExtraHosts(ctx, opt.ExtraHosts, nodeDriver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(extraHosts) > 0 {
|
||||||
|
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup shm size
|
||||||
|
if opt.ShmSize.Value() > 0 {
|
||||||
|
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup ulimits
|
||||||
|
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
} else if len(ulimits) > 0 {
|
||||||
|
so.FrontendAttrs["ulimit"] = ulimits
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark call request as internal
|
||||||
|
if opt.CallFunc != nil {
|
||||||
|
so.Internal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return &so, releaseF, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||||
|
if inp.ContextPath == "" {
|
||||||
|
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: handle stdin, symlinks, remote contexts, check files exist
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
dockerfileReader io.ReadCloser
|
||||||
|
dockerfileDir string
|
||||||
|
dockerfileName = inp.DockerfilePath
|
||||||
|
toRemove []string
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case inp.ContextState != nil:
|
||||||
|
if target.FrontendInputs == nil {
|
||||||
|
target.FrontendInputs = make(map[string]llb.State)
|
||||||
|
}
|
||||||
|
target.FrontendInputs["context"] = *inp.ContextState
|
||||||
|
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||||
|
case inp.ContextPath == "-":
|
||||||
|
if inp.DockerfilePath == "-" {
|
||||||
|
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
rc := inp.InStream.NewReadCloser()
|
||||||
|
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||||
|
}
|
||||||
|
if !(err == io.EOF && len(magic) == 0) {
|
||||||
|
if isArchive(magic) {
|
||||||
|
// stdin is context
|
||||||
|
up := uploadprovider.New()
|
||||||
|
target.FrontendAttrs["context"] = up.Add(rc)
|
||||||
|
target.Session = append(target.Session, up)
|
||||||
|
} else {
|
||||||
|
if inp.DockerfilePath != "" {
|
||||||
|
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||||
|
}
|
||||||
|
// stdin is dockerfile
|
||||||
|
dockerfileReader = rc
|
||||||
|
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||||
|
toRemove = append(toRemove, inp.ContextPath)
|
||||||
|
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case osutil.IsLocalDir(inp.ContextPath):
|
||||||
|
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sharedKey := inp.ContextPath
|
||||||
|
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||||
|
sharedKey = filepath.Base(p)
|
||||||
|
}
|
||||||
|
target.SharedKey = sharedKey
|
||||||
|
switch inp.DockerfilePath {
|
||||||
|
case "-":
|
||||||
|
dockerfileReader = inp.InStream.NewReadCloser()
|
||||||
|
case "":
|
||||||
|
dockerfileDir = inp.ContextPath
|
||||||
|
default:
|
||||||
|
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||||
|
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||||
|
}
|
||||||
|
case IsRemoteURL(inp.ContextPath):
|
||||||
|
if inp.DockerfilePath == "-" {
|
||||||
|
dockerfileReader = inp.InStream.NewReadCloser()
|
||||||
|
} else if filepath.IsAbs(inp.DockerfilePath) {
|
||||||
|
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||||
|
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
}
|
||||||
|
target.FrontendAttrs["context"] = inp.ContextPath
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inp.DockerfileInline != "" {
|
||||||
|
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileReader != nil {
|
||||||
|
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toRemove = append(toRemove, dockerfileDir)
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
}
|
||||||
|
if isHTTPURL(inp.DockerfilePath) {
|
||||||
|
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toRemove = append(toRemove, dockerfileDir)
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||||
|
delete(target.FrontendInputs, "dockerfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileName == "" {
|
||||||
|
dockerfileName = "Dockerfile"
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileDir != "" {
|
||||||
|
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
target.FrontendAttrs["filename"] = dockerfileName
|
||||||
|
|
||||||
|
for k, v := range inp.NamedContexts {
|
||||||
|
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
||||||
|
if v.State != nil {
|
||||||
|
target.FrontendAttrs["context:"+k] = "input:" + k
|
||||||
|
if target.FrontendInputs == nil {
|
||||||
|
target.FrontendInputs = make(map[string]llb.State)
|
||||||
|
}
|
||||||
|
target.FrontendInputs[k] = *v.State
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsRemoteURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
||||||
|
target.FrontendAttrs["context:"+k] = v.Path
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle OCI layout
|
||||||
|
if strings.HasPrefix(v.Path, "oci-layout://") {
|
||||||
|
localPath := strings.TrimPrefix(v.Path, "oci-layout://")
|
||||||
|
localPath, dig, hasDigest := strings.Cut(localPath, "@")
|
||||||
|
localPath, tag, hasTag := strings.Cut(localPath, ":")
|
||||||
|
if !hasTag {
|
||||||
|
tag = "latest"
|
||||||
|
}
|
||||||
|
if !hasDigest {
|
||||||
|
dig, err = resolveDigest(localPath, tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store, err := local.NewStore(localPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||||
|
}
|
||||||
|
storeName := identity.NewID()
|
||||||
|
if target.OCIStores == nil {
|
||||||
|
target.OCIStores = map[string]content.Store{}
|
||||||
|
}
|
||||||
|
target.OCIStores[storeName] = store
|
||||||
|
|
||||||
|
target.FrontendAttrs["context:"+k] = "oci-layout://" + storeName + ":" + tag + "@" + dig
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
st, err := os.Stat(v.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||||
|
}
|
||||||
|
if !st.IsDir() {
|
||||||
|
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||||
|
}
|
||||||
|
localName := k
|
||||||
|
if k == "context" || k == "dockerfile" {
|
||||||
|
localName = "_" + k // underscore to avoid collisions
|
||||||
|
}
|
||||||
|
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||||
|
}
|
||||||
|
|
||||||
|
release := func() {
|
||||||
|
for _, dir := range toRemove {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return release, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||||
|
idx := ociindex.NewStoreIndex(localPath)
|
||||||
|
|
||||||
|
// lookup by name
|
||||||
|
desc, err := idx.Get(tag)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
// lookup single
|
||||||
|
desc, err = idx.GetSingle()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
return "", errors.New("failed to resolve digest")
|
||||||
|
}
|
||||||
|
|
||||||
|
dig = string(desc.Digest)
|
||||||
|
_, err = digest.Parse(dig)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "invalid digest %s", dig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLocalMount(name, dir string, so *client.SolveOpt) error {
|
||||||
|
lm, err := fsutil.NewFS(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if so.LocalMounts == nil {
|
||||||
|
so.LocalMounts = map[string]fsutil.FS{}
|
||||||
|
}
|
||||||
|
so.LocalMounts[name] = &fs{FS: lm, dir: dir}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) {
|
||||||
|
dir, err := os.MkdirTemp("", "dockerfile")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
f, err := os.Create(filepath.Join(dir, "Dockerfile"))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if multiReader != nil {
|
||||||
|
dt, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
multiReader.Reset(dt)
|
||||||
|
r = bytes.NewReader(dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, r); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return dir, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle https://github.com/moby/moby/pull/10858
|
||||||
|
func handleLowercaseDockerfile(dir, p string) string {
|
||||||
|
if filepath.Base(p) != "Dockerfile" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(filepath.Dir(filepath.Join(dir, p)))
|
||||||
|
if err != nil {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
names, err := f.Readdirnames(-1)
|
||||||
|
if err != nil {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
foundLowerCase := false
|
||||||
|
for _, n := range names {
|
||||||
|
if n == "Dockerfile" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
if n == "dockerfile" {
|
||||||
|
foundLowerCase = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundLowerCase {
|
||||||
|
return filepath.Join(filepath.Dir(p), "dockerfile")
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
type fs struct {
|
||||||
|
fsutil.FS
|
||||||
|
dir string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fsutil.FS = &fs{}
|
||||||
156
build/provenance.go
Normal file
156
build/provenance.go
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/content/proxy"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type provenancePredicate struct {
|
||||||
|
Builder *provenanceBuilder `json:"builder,omitempty"`
|
||||||
|
provenancetypes.ProvenancePredicate
|
||||||
|
}
|
||||||
|
|
||||||
|
type provenanceBuilder struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.SolveResponse, ref string, mode confutil.MetadataProvenanceMode, pw progress.Writer) error {
|
||||||
|
if mode == confutil.MetadataProvenanceModeDisabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
pw = progress.ResetTime(pw)
|
||||||
|
return progress.Wrap("resolving provenance for metadata file", pw.Write, func(l progress.SubLogger) error {
|
||||||
|
res, err := fetchProvenance(ctx, c, ref, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range res {
|
||||||
|
sr.ExporterResponse[k] = v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchProvenance(ctx context.Context, c *client.Client, ref string, mode confutil.MetadataProvenanceMode) (out map[string]string, err error) {
|
||||||
|
cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||||
|
Ref: ref,
|
||||||
|
EarlyExit: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
store := proxy.NewContentStore(c.ContentClient())
|
||||||
|
for {
|
||||||
|
ev, err := cl.Recv()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ev.Record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ev.Record.Result != nil {
|
||||||
|
desc := lookupProvenance(ev.Record.Result)
|
||||||
|
if desc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||||
|
}
|
||||||
|
prv, err := encodeProvenance(dt, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
if out == nil {
|
||||||
|
out = make(map[string]string)
|
||||||
|
}
|
||||||
|
out["buildx.build.provenance"] = prv
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else if ev.Record.Results != nil {
|
||||||
|
for platform, res := range ev.Record.Results {
|
||||||
|
platform := platform
|
||||||
|
desc := lookupProvenance(res)
|
||||||
|
if desc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eg.Go(func() error {
|
||||||
|
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||||
|
}
|
||||||
|
prv, err := encodeProvenance(dt, mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
if out == nil {
|
||||||
|
out = make(map[string]string)
|
||||||
|
}
|
||||||
|
out["buildx.build.provenance/"+platform] = prv
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, eg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupProvenance(res *controlapi.BuildResultInfo) *ocispecs.Descriptor {
|
||||||
|
for _, a := range res.Attestations {
|
||||||
|
if a.MediaType == "application/vnd.in-toto+json" && strings.HasPrefix(a.Annotations["in-toto.io/predicate-type"], "https://slsa.dev/provenance/") {
|
||||||
|
return &ocispecs.Descriptor{
|
||||||
|
Digest: a.Digest,
|
||||||
|
Size: a.Size_,
|
||||||
|
MediaType: a.MediaType,
|
||||||
|
Annotations: a.Annotations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeProvenance(dt []byte, mode confutil.MetadataProvenanceMode) (string, error) {
|
||||||
|
var prv provenancePredicate
|
||||||
|
if err := json.Unmarshal(dt, &prv); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to unmarshal provenance")
|
||||||
|
}
|
||||||
|
if prv.Builder != nil && prv.Builder.ID == "" {
|
||||||
|
// reset builder if id is empty
|
||||||
|
prv.Builder = nil
|
||||||
|
}
|
||||||
|
if mode == confutil.MetadataProvenanceModeMin {
|
||||||
|
// reset fields for minimal provenance
|
||||||
|
prv.BuildConfig = nil
|
||||||
|
prv.Metadata = nil
|
||||||
|
}
|
||||||
|
dtprv, err := json.Marshal(prv)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to marshal provenance")
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(dtprv), nil
|
||||||
|
}
|
||||||
164
build/replicatedstream.go
Normal file
164
build/replicatedstream.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SyncMultiReader struct {
|
||||||
|
source *bufio.Reader
|
||||||
|
buffer []byte
|
||||||
|
static []byte
|
||||||
|
mu sync.Mutex
|
||||||
|
cond *sync.Cond
|
||||||
|
readers []*syncReader
|
||||||
|
err error
|
||||||
|
offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncReader struct {
|
||||||
|
mr *SyncMultiReader
|
||||||
|
offset int
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSyncMultiReader(source io.Reader) *SyncMultiReader {
|
||||||
|
mr := &SyncMultiReader{
|
||||||
|
source: bufio.NewReader(source),
|
||||||
|
buffer: make([]byte, 0, 32*1024),
|
||||||
|
}
|
||||||
|
mr.cond = sync.NewCond(&mr.mu)
|
||||||
|
return mr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) Peek(n int) ([]byte, error) {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
if mr.static != nil {
|
||||||
|
return mr.static[min(n, len(mr.static)):], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return mr.source.Peek(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) Reset(dt []byte) {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
mr.static = dt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser {
|
||||||
|
mr.mu.Lock()
|
||||||
|
defer mr.mu.Unlock()
|
||||||
|
|
||||||
|
if mr.static != nil {
|
||||||
|
return io.NopCloser(bytes.NewReader(mr.static))
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := &syncReader{
|
||||||
|
mr: mr,
|
||||||
|
}
|
||||||
|
mr.readers = append(mr.readers, reader)
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) Read(p []byte) (int, error) {
|
||||||
|
sr.mr.mu.Lock()
|
||||||
|
defer sr.mr.mu.Unlock()
|
||||||
|
|
||||||
|
return sr.read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) read(p []byte) (int, error) {
|
||||||
|
end := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if sr.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
end := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
|
||||||
|
if sr.mr.err != nil && sr.offset == end {
|
||||||
|
return 0, sr.mr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
start := sr.offset - sr.mr.offset
|
||||||
|
|
||||||
|
dt := sr.mr.buffer[start:]
|
||||||
|
|
||||||
|
if len(dt) > 0 {
|
||||||
|
n := copy(p, dt)
|
||||||
|
sr.offset += n
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for readers that have not caught up
|
||||||
|
hasOpen := false
|
||||||
|
for _, r := range sr.mr.readers {
|
||||||
|
if !r.closed {
|
||||||
|
hasOpen = true
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.offset < end {
|
||||||
|
sr.mr.cond.Wait()
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasOpen {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
last := sr.mr.offset + len(sr.mr.buffer)
|
||||||
|
// another reader has already updated the buffer
|
||||||
|
if last > end || sr.mr.err != nil {
|
||||||
|
return sr.read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.mr.offset += len(sr.mr.buffer)
|
||||||
|
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)]
|
||||||
|
n, err := sr.mr.source.Read(sr.mr.buffer)
|
||||||
|
if n >= 0 {
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:n]
|
||||||
|
} else {
|
||||||
|
sr.mr.buffer = sr.mr.buffer[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
sr.mr.err = err
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nn := copy(p, sr.mr.buffer)
|
||||||
|
sr.offset += nn
|
||||||
|
|
||||||
|
return nn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *syncReader) Close() error {
|
||||||
|
sr.mr.mu.Lock()
|
||||||
|
defer sr.mr.mu.Unlock()
|
||||||
|
|
||||||
|
if sr.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.closed = true
|
||||||
|
|
||||||
|
sr.mr.cond.Broadcast()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
77
build/replicatedstream_test.go
Normal file
77
build/replicatedstream_test.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
mathrand "math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func generateRandomData(size int) []byte {
|
||||||
|
data := make([]byte, size)
|
||||||
|
rand.Read(data)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
func TestSyncMultiReaderParallel(t *testing.T) {
|
||||||
|
data := generateRandomData(1024 * 1024)
|
||||||
|
source := bytes.NewReader(data)
|
||||||
|
mr := NewSyncMultiReader(source)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
numReaders := 10
|
||||||
|
bufferSize := 4096 * 4
|
||||||
|
|
||||||
|
readers := make([]io.ReadCloser, numReaders)
|
||||||
|
|
||||||
|
for i := 0; i < numReaders; i++ {
|
||||||
|
readers[i] = mr.NewReadCloser()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numReaders; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(readerId int) {
|
||||||
|
defer wg.Done()
|
||||||
|
reader := readers[readerId]
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
totalRead := 0
|
||||||
|
buf := make([]byte, bufferSize)
|
||||||
|
for totalRead < len(data) {
|
||||||
|
// Simulate random read sizes
|
||||||
|
readSize := mathrand.Intn(bufferSize) //nolint:gosec
|
||||||
|
n, err := reader.Read(buf[:readSize])
|
||||||
|
|
||||||
|
if n > 0 {
|
||||||
|
assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId)
|
||||||
|
totalRead += n
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err, "Reader %d error", readerId)
|
||||||
|
|
||||||
|
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||||
|
t.Logf("Reader %d closing", readerId)
|
||||||
|
// Simulate random close
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate random timing between reads
|
||||||
|
time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId)
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
@@ -117,7 +117,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
gwClient: c,
|
gwClient: c,
|
||||||
gwCtx: ctx,
|
gwCtx: ctx,
|
||||||
}
|
}
|
||||||
respErr = se
|
respErr = err // return original error to preserve stacktrace
|
||||||
close(done)
|
close(done)
|
||||||
|
|
||||||
// Block until the caller closes the ResultHandle.
|
// Block until the caller closes the ResultHandle.
|
||||||
@@ -160,6 +160,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||||||
opt.Ref = ""
|
opt.Ref = ""
|
||||||
opt.Exports = nil
|
opt.Exports = nil
|
||||||
opt.CacheExports = nil
|
opt.CacheExports = nil
|
||||||
|
opt.Internal = true
|
||||||
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
res, err := evalDefinition(ctx, c, def)
|
res, err := evalDefinition(ctx, c, def)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -291,10 +292,10 @@ func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ResultHandle) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
func (r *ResultHandle) getContainerConfig(cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||||
if r.res != nil && r.solveErr == nil {
|
if r.res != nil && r.solveErr == nil {
|
||||||
logrus.Debugf("creating container from successful build")
|
logrus.Debugf("creating container from successful build")
|
||||||
ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg)
|
ccfg, err := containerConfigFromResult(r.res, *cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return containerCfg, err
|
return containerCfg, err
|
||||||
}
|
}
|
||||||
@@ -326,7 +327,7 @@ func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin i
|
|||||||
return processCfg, nil
|
return processCfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerConfigFromResult(ctx context.Context, res *gateway.Result, c gateway.Client, cfg controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
func containerConfigFromResult(res *gateway.Result, cfg controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||||
if cfg.Initial {
|
if cfg.Initial {
|
||||||
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
||||||
}
|
}
|
||||||
@@ -387,7 +388,7 @@ func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Res
|
|||||||
} else if img != nil {
|
} else if img != nil {
|
||||||
args = append(args, img.Config.Entrypoint...)
|
args = append(args, img.Config.Entrypoint...)
|
||||||
}
|
}
|
||||||
if cfg.Cmd != nil {
|
if !cfg.NoCmd {
|
||||||
args = append(args, cfg.Cmd...)
|
args = append(args, cfg.Cmd...)
|
||||||
} else if img != nil {
|
} else if img != nil {
|
||||||
args = append(args, img.Config.Cmd...)
|
args = append(args, img.Config.Cmd...)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
|||||||
var out string
|
var out string
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
_, err = c.Build(ctx, client.SolveOpt{Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||||
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -3,14 +3,17 @@ package build
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
|
||||||
"github.com/moby/buildkit/util/gitutil"
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -22,8 +25,15 @@ const (
|
|||||||
mobyHostGatewayName = "host-gateway"
|
mobyHostGatewayName = "host-gateway"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// isHTTPURL returns true if the provided str is an HTTP(S) URL by checking if it
|
||||||
|
// has a http:// or https:// scheme. No validation is performed to verify if the
|
||||||
|
// URL is well-formed.
|
||||||
|
func isHTTPURL(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
|
||||||
|
}
|
||||||
|
|
||||||
func IsRemoteURL(c string) bool {
|
func IsRemoteURL(c string) bool {
|
||||||
if urlutil.IsURL(c) {
|
if isHTTPURL(c) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if _, err := gitutil.ParseGitRef(c); err == nil {
|
if _, err := gitutil.ParseGitRef(c); err == nil {
|
||||||
@@ -32,11 +42,6 @@ func IsRemoteURL(c string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func isLocalDir(c string) bool {
|
|
||||||
st, err := os.Stat(c)
|
|
||||||
return err == nil && st.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
func isArchive(header []byte) bool {
|
func isArchive(header []byte) bool {
|
||||||
for _, m := range [][]byte{
|
for _, m := range [][]byte{
|
||||||
{0x42, 0x5A, 0x68}, // bzip2
|
{0x42, 0x5A, 0x68}, // bzip2
|
||||||
@@ -57,18 +62,34 @@ func isArchive(header []byte) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
||||||
func toBuildkitExtraHosts(inp []string, mobyDriver bool) (string, error) {
|
func toBuildkitExtraHosts(ctx context.Context, inp []string, nodeDriver *driver.DriverHandle) (string, error) {
|
||||||
if len(inp) == 0 {
|
if len(inp) == 0 {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
hosts := make([]string, 0, len(inp))
|
hosts := make([]string, 0, len(inp))
|
||||||
for _, h := range inp {
|
for _, h := range inp {
|
||||||
host, ip, ok := strings.Cut(h, ":")
|
host, ip, ok := strings.Cut(h, "=")
|
||||||
|
if !ok {
|
||||||
|
host, ip, ok = strings.Cut(h, ":")
|
||||||
|
}
|
||||||
if !ok || host == "" || ip == "" {
|
if !ok || host == "" || ip == "" {
|
||||||
return "", errors.Errorf("invalid host %s", h)
|
return "", errors.Errorf("invalid host %s", h)
|
||||||
}
|
}
|
||||||
// Skip IP address validation for "host-gateway" string with moby driver
|
// If the IP Address is a "host-gateway", replace this value with the
|
||||||
if !mobyDriver || ip != mobyHostGatewayName {
|
// IP address provided by the worker's label.
|
||||||
|
if ip == mobyHostGatewayName {
|
||||||
|
hgip, err := nodeDriver.HostGatewayIP(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "unable to derive the IP value for host-gateway")
|
||||||
|
}
|
||||||
|
ip = hgip.String()
|
||||||
|
} else {
|
||||||
|
// If the address is enclosed in square brackets, extract it (for IPv6, but
|
||||||
|
// permit it for IPv4 as well; we don't know the address family here, but it's
|
||||||
|
// unambiguous).
|
||||||
|
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
|
||||||
|
ip = ip[1 : len(ip)-1]
|
||||||
|
}
|
||||||
if net.ParseIP(ip) == nil {
|
if net.ParseIP(ip) == nil {
|
||||||
return "", errors.Errorf("invalid host %s", h)
|
return "", errors.Errorf("invalid host %s", h)
|
||||||
}
|
}
|
||||||
@@ -89,3 +110,21 @@ func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
|
|||||||
}
|
}
|
||||||
return strings.Join(ulimits, ","), nil
|
return strings.Join(ulimits, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func notSupported(f driver.Feature, d *driver.DriverHandle, docs string) error {
|
||||||
|
return errors.Errorf(`%s is not supported for the %s driver.
|
||||||
|
Switch to a different driver, or turn on the containerd image store, and try again.
|
||||||
|
Learn more at %s`, f, d.Factory().Name(), docs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func noDefaultLoad() bool {
|
||||||
|
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("invalid non-bool value for BUILDX_NO_DEFAULT_LOAD: %s", v)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|||||||
148
build/utils_test.go
Normal file
148
build/utils_test.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestToBuildkitExtraHosts(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
doc string
|
||||||
|
input []string
|
||||||
|
expectedOut string // Expect output==input if not set.
|
||||||
|
expectedErr string // Expect success if not set.
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "IPv4, colon sep",
|
||||||
|
input: []string{`myhost:192.168.0.1`},
|
||||||
|
expectedOut: `myhost=192.168.0.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv4, eq sep",
|
||||||
|
input: []string{`myhost=192.168.0.1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Weird but permitted, IPv4 with brackets",
|
||||||
|
input: []string{`myhost=[192.168.0.1]`},
|
||||||
|
expectedOut: `myhost=192.168.0.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Host and domain",
|
||||||
|
input: []string{`host.and.domain.invalid:10.0.2.1`},
|
||||||
|
expectedOut: `host.and.domain.invalid=10.0.2.1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, colon sep",
|
||||||
|
input: []string{`anipv6host:2003:ab34:e::1`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, colon sep, brackets",
|
||||||
|
input: []string{`anipv6host:[2003:ab34:e::1]`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6, eq sep, brackets",
|
||||||
|
input: []string{`anipv6host=[2003:ab34:e::1]`},
|
||||||
|
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, colon sep",
|
||||||
|
input: []string{`ipv6local:::1`},
|
||||||
|
expectedOut: `ipv6local=::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, eq sep",
|
||||||
|
input: []string{`ipv6local=::1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, eq sep, brackets",
|
||||||
|
input: []string{`ipv6local=[::1]`},
|
||||||
|
expectedOut: `ipv6local=::1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, colon sep",
|
||||||
|
input: []string{`ipv6local:0:0:0:0:0:0:0:1`},
|
||||||
|
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, eq sep",
|
||||||
|
input: []string{`ipv6local=0:0:0:0:0:0:0:1`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "IPv6 localhost, non-canonical, eq sep, brackets",
|
||||||
|
input: []string{`ipv6local=[0:0:0:0:0:0:0:1]`},
|
||||||
|
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad address, colon sep",
|
||||||
|
input: []string{`myhost:192.notanipaddress.1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad address, eq sep",
|
||||||
|
input: []string{`myhost=192.notanipaddress.1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "No sep",
|
||||||
|
input: []string{`thathost-nosemicolon10.0.0.1`},
|
||||||
|
expectedErr: `bad format for add-host: "thathost-nosemicolon10.0.0.1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6",
|
||||||
|
input: []string{`anipv6host:::::1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::::1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, trailing colons",
|
||||||
|
input: []string{`ipv6local:::0::`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::0::"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, missing close bracket",
|
||||||
|
input: []string{`ipv6addr=[::1`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "[::1"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Bad IPv6, missing open bracket",
|
||||||
|
input: []string{`ipv6addr=::1]`},
|
||||||
|
expectedErr: `invalid IP address in add-host: "::1]"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Missing address, colon sep",
|
||||||
|
input: []string{`myhost.invalid:`},
|
||||||
|
expectedErr: `invalid IP address in add-host: ""`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "Missing address, eq sep",
|
||||||
|
input: []string{`myhost.invalid=`},
|
||||||
|
expectedErr: `invalid IP address in add-host: ""`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "No input",
|
||||||
|
input: []string{``},
|
||||||
|
expectedErr: `bad format for add-host: ""`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
tc := tc
|
||||||
|
if tc.expectedOut == "" {
|
||||||
|
tc.expectedOut = strings.Join(tc.input, ",")
|
||||||
|
}
|
||||||
|
t.Run(tc.doc, func(t *testing.T) {
|
||||||
|
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
||||||
|
if tc.expectedErr == "" {
|
||||||
|
require.Equal(t, tc.expectedOut, actualOut)
|
||||||
|
require.Nil(t, actualErr)
|
||||||
|
} else {
|
||||||
|
require.Zero(t, actualOut)
|
||||||
|
require.Error(t, actualErr, tc.expectedErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,18 +2,31 @@ package builder
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
|
k8sutil "github.com/docker/buildx/driver/kubernetes/util"
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/dockerutil"
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
dopts "github.com/docker/cli/opts"
|
||||||
|
"github.com/google/shlex"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -157,13 +170,14 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.AutoMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseCtx := ctx
|
baseCtx := ctx
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
errCh := make(chan error, len(toBoot))
|
||||||
for _, idx := range toBoot {
|
for _, idx := range toBoot {
|
||||||
func(idx int) {
|
func(idx int) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
@@ -171,6 +185,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
|
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.nodes[idx].Err = err
|
b.nodes[idx].Err = err
|
||||||
|
errCh <- err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -178,11 +193,15 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = eg.Wait()
|
err = eg.Wait()
|
||||||
|
close(errCh)
|
||||||
err1 := printer.Wait()
|
err1 := printer.Wait()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err == nil && len(errCh) == len(toBoot) {
|
||||||
|
return false, <-errCh
|
||||||
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,7 +226,7 @@ type driverFactory struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Factory returns the driver factory.
|
// Factory returns the driver factory.
|
||||||
func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
|
func (b *Builder) Factory(ctx context.Context, dialMeta map[string][]string) (_ driver.Factory, err error) {
|
||||||
b.driverFactory.once.Do(func() {
|
b.driverFactory.once.Do(func() {
|
||||||
if b.Driver != "" {
|
if b.Driver != "" {
|
||||||
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
|
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
|
||||||
@@ -230,7 +249,7 @@ func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
|
|||||||
if _, err = dockerapi.Ping(ctx); err != nil {
|
if _, err = dockerapi.Ping(ctx); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false, dialMeta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -240,6 +259,28 @@ func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
|
|||||||
return b.driverFactory.Factory, err
|
return b.driverFactory.Factory, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *Builder) MarshalJSON() ([]byte, error) {
|
||||||
|
var berr string
|
||||||
|
if b.err != nil {
|
||||||
|
berr = strings.TrimSpace(b.err.Error())
|
||||||
|
}
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Name string
|
||||||
|
Driver string
|
||||||
|
LastActivity time.Time `json:",omitempty"`
|
||||||
|
Dynamic bool
|
||||||
|
Nodes []Node
|
||||||
|
Err string `json:",omitempty"`
|
||||||
|
}{
|
||||||
|
Name: b.Name,
|
||||||
|
Driver: b.Driver,
|
||||||
|
LastActivity: b.LastActivity,
|
||||||
|
Dynamic: b.Dynamic,
|
||||||
|
Nodes: b.nodes,
|
||||||
|
Err: berr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// GetBuilders returns all builders
|
// GetBuilders returns all builders
|
||||||
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||||
storeng, err := txn.List()
|
storeng, err := txn.List()
|
||||||
@@ -290,3 +331,363 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||||||
|
|
||||||
return builders, nil
|
return builders, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CreateOpts struct {
|
||||||
|
Name string
|
||||||
|
Driver string
|
||||||
|
NodeName string
|
||||||
|
Platforms []string
|
||||||
|
BuildkitdFlags string
|
||||||
|
BuildkitdConfigFile string
|
||||||
|
DriverOpts []string
|
||||||
|
Use bool
|
||||||
|
Endpoint string
|
||||||
|
Append bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts CreateOpts) (*Builder, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if opts.Name == "default" {
|
||||||
|
return nil, errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
||||||
|
} else if opts.Append && opts.Name == "" {
|
||||||
|
return nil, errors.Errorf("append requires a builder name")
|
||||||
|
}
|
||||||
|
|
||||||
|
name := opts.Name
|
||||||
|
if name == "" {
|
||||||
|
name, err = store.GenerateName(txn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.Append {
|
||||||
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, c := range contexts {
|
||||||
|
if c.Name == name {
|
||||||
|
return nil, errors.Errorf("instance name %q already exists as context builder", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ng, err := txn.NodeGroupByName(name)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(errors.Cause(err)) {
|
||||||
|
if opts.Append && opts.Name != "" {
|
||||||
|
return nil, errors.Errorf("failed to find instance %q for append", opts.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
||||||
|
|
||||||
|
driverName := opts.Driver
|
||||||
|
if driverName == "" {
|
||||||
|
if ng != nil {
|
||||||
|
driverName = ng.Driver
|
||||||
|
} else if opts.Endpoint == "" && buildkitHost != "" {
|
||||||
|
driverName = "remote"
|
||||||
|
} else {
|
||||||
|
f, err := driver.GetDefaultFactory(ctx, opts.Endpoint, dockerCli.Client(), true, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
return nil, errors.Errorf("no valid drivers found")
|
||||||
|
}
|
||||||
|
driverName = f.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ng != nil {
|
||||||
|
if opts.NodeName == "" && !opts.Append {
|
||||||
|
return nil, errors.Errorf("existing instance for %q but no append mode, specify the node name to make changes for existing instances", name)
|
||||||
|
}
|
||||||
|
if driverName != ng.Driver {
|
||||||
|
return nil, errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := driver.GetFactory(driverName, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ngOriginal := ng
|
||||||
|
if ngOriginal != nil {
|
||||||
|
ngOriginal = ngOriginal.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ng == nil {
|
||||||
|
ng = &store.NodeGroup{
|
||||||
|
Name: name,
|
||||||
|
Driver: driverName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
driverOpts, err := csvToMap(opts.DriverOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitdConfigFile := opts.BuildkitdConfigFile
|
||||||
|
if buildkitdConfigFile == "" {
|
||||||
|
// if buildkit daemon config is not provided, check if the default one
|
||||||
|
// is available and use it
|
||||||
|
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
||||||
|
buildkitdConfigFile = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildkitdFlags, err := parseBuildkitdFlags(opts.BuildkitdFlags, driverName, driverOpts, buildkitdConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ep string
|
||||||
|
var setEp bool
|
||||||
|
switch {
|
||||||
|
case driverName == "kubernetes":
|
||||||
|
if opts.Endpoint != "" {
|
||||||
|
return nil, errors.Errorf("kubernetes driver does not support endpoint args %q", opts.Endpoint)
|
||||||
|
}
|
||||||
|
// generate node name if not provided to avoid duplicated endpoint
|
||||||
|
// error: https://github.com/docker/setup-buildx-action/issues/215
|
||||||
|
nodeName := opts.NodeName
|
||||||
|
if nodeName == "" {
|
||||||
|
nodeName, err = k8sutil.GenerateNodeName(name, txn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// naming endpoint to make append works
|
||||||
|
ep = (&url.URL{
|
||||||
|
Scheme: driverName,
|
||||||
|
Path: "/" + name,
|
||||||
|
RawQuery: (&url.Values{
|
||||||
|
"deployment": {nodeName},
|
||||||
|
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||||
|
}).Encode(),
|
||||||
|
}).String()
|
||||||
|
setEp = false
|
||||||
|
case driverName == "remote":
|
||||||
|
if opts.Endpoint != "" {
|
||||||
|
ep = opts.Endpoint
|
||||||
|
} else if buildkitHost != "" {
|
||||||
|
ep = buildkitHost
|
||||||
|
} else {
|
||||||
|
return nil, errors.Errorf("no remote endpoint provided")
|
||||||
|
}
|
||||||
|
ep, err = validateBuildkitEndpoint(ep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = true
|
||||||
|
case opts.Endpoint != "":
|
||||||
|
ep, err = validateEndpoint(dockerCli, opts.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = true
|
||||||
|
default:
|
||||||
|
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||||
|
return nil, errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with context set to <context-name>")
|
||||||
|
}
|
||||||
|
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setEp = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ng.Update(opts.NodeName, ep, opts.Platforms, setEp, opts.Append, buildkitdFlags, buildkitdConfigFile, driverOpts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := txn.Save(ng); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(ng.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
if err := node.Err; err != nil {
|
||||||
|
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||||
|
var err2 error
|
||||||
|
if ngOriginal == nil {
|
||||||
|
err2 = txn.Remove(ng.Name)
|
||||||
|
} else {
|
||||||
|
err2 = txn.Save(ngOriginal)
|
||||||
|
}
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, errors.Errorf("could not rollback to previous state: %s", err2)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Use && ep != "" {
|
||||||
|
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type LeaveOpts struct {
|
||||||
|
Name string
|
||||||
|
NodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func Leave(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts LeaveOpts) error {
|
||||||
|
if opts.Name == "" {
|
||||||
|
return errors.Errorf("leave requires instance name")
|
||||||
|
}
|
||||||
|
if opts.NodeName == "" {
|
||||||
|
return errors.Errorf("leave requires node name")
|
||||||
|
}
|
||||||
|
|
||||||
|
ng, err := txn.NodeGroupByName(opts.Name)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(errors.Cause(err)) {
|
||||||
|
return errors.Errorf("failed to find instance %q for leave", opts.Name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ng.Leave(opts.NodeName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := localstate.New(confutil.ConfigDir(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ls.RemoveBuilderNode(ng.Name, opts.NodeName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return txn.Save(ng)
|
||||||
|
}
|
||||||
|
|
||||||
|
func csvToMap(in []string) (map[string]string, error) {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
m := make(map[string]string, len(in))
|
||||||
|
for _, s := range in {
|
||||||
|
fields, err := csvvalue.Fields(s, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, v := range fields {
|
||||||
|
p := strings.SplitN(v, "=", 2)
|
||||||
|
if len(p) != 2 {
|
||||||
|
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
||||||
|
}
|
||||||
|
m[p[0]] = p[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||||
|
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||||
|
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||||
|
if err == nil && dem != nil {
|
||||||
|
if ep == "default" {
|
||||||
|
return dem.Host, nil
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
h, err := dopts.ParseHost(true, ep)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||||
|
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||||
|
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBuildkitdFlags parses buildkit flags
|
||||||
|
func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string, buildkitdConfigFile string) (res []string, err error) {
|
||||||
|
if inp != "" {
|
||||||
|
res, err = shlex.Split(inp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to parse buildkit flags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var allowInsecureEntitlements []string
|
||||||
|
flags := pflag.NewFlagSet("buildkitd", pflag.ContinueOnError)
|
||||||
|
flags.Usage = func() {}
|
||||||
|
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||||
|
_ = flags.Parse(res)
|
||||||
|
|
||||||
|
var hasNetworkHostEntitlement bool
|
||||||
|
for _, e := range allowInsecureEntitlements {
|
||||||
|
if e == "network.host" {
|
||||||
|
hasNetworkHostEntitlement = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hasNetworkHostEntitlementInConf bool
|
||||||
|
if buildkitdConfigFile != "" {
|
||||||
|
btoml, err := confutil.LoadConfigTree(buildkitdConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if btoml != nil {
|
||||||
|
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||||
|
for _, e := range ies.([]string) {
|
||||||
|
if e == "network.host" {
|
||||||
|
hasNetworkHostEntitlementInConf = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := driverOpts["network"]; ok && v == "host" && !hasNetworkHostEntitlement && driver == "docker-container" {
|
||||||
|
// always set network.host entitlement if user has set network=host
|
||||||
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
|
} else if len(allowInsecureEntitlements) == 0 && !hasNetworkHostEntitlementInConf && (driver == "kubernetes" || driver == "docker-container") {
|
||||||
|
// set network.host entitlement if user does not provide any as
|
||||||
|
// network is isolated for container drivers.
|
||||||
|
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|||||||
173
builder/builder_test.go
Normal file
173
builder/builder_test.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCsvToMap(t *testing.T) {
|
||||||
|
d := []string{
|
||||||
|
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
||||||
|
"namespace=default",
|
||||||
|
}
|
||||||
|
r, err := csvToMap(d)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Contains(t, r, "tolerations")
|
||||||
|
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
||||||
|
|
||||||
|
require.Contains(t, r, "replicas")
|
||||||
|
require.Equal(t, r["replicas"], "1")
|
||||||
|
|
||||||
|
require.Contains(t, r, "namespace")
|
||||||
|
require.Equal(t, r["namespace"], "default")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseBuildkitdFlags(t *testing.T) {
|
||||||
|
buildkitdConf := `
|
||||||
|
# debug enables additional debug logging
|
||||||
|
debug = true
|
||||||
|
# insecure-entitlements allows insecure entitlements, disabled by default.
|
||||||
|
insecure-entitlements = [ "network.host", "security.insecure" ]
|
||||||
|
[log]
|
||||||
|
# log formatter: json or text
|
||||||
|
format = "text"
|
||||||
|
`
|
||||||
|
dirConf := t.TempDir()
|
||||||
|
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
||||||
|
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(buildkitdConf), 0644))
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
flags string
|
||||||
|
driver string
|
||||||
|
driverOpts map[string]string
|
||||||
|
buildkitdConfigFile string
|
||||||
|
expected []string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"docker-container no flags",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kubernetes no flags",
|
||||||
|
"",
|
||||||
|
"kubernetes",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remote no flags",
|
||||||
|
"",
|
||||||
|
"remote",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure flag",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure and host flag",
|
||||||
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with network host opt",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with host flag and network host opt",
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with insecure, host flag and network host opt",
|
||||||
|
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||||
|
"docker-container",
|
||||||
|
map[string]string{"network": "host"},
|
||||||
|
"",
|
||||||
|
[]string{
|
||||||
|
"--allow-insecure-entitlement=network.host",
|
||||||
|
"--allow-insecure-entitlement=security.insecure",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"docker-container with buildkitd conf setting network.host entitlement",
|
||||||
|
"",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
buildkitdConfPath,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error parsing flags",
|
||||||
|
"foo'",
|
||||||
|
"docker-container",
|
||||||
|
nil,
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range testCases {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
flags, err := parseBuildkitdFlags(tt.flags, tt.driver, tt.driverOpts, tt.buildkitdConfigFile)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tt.expected, flags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
155
builder/node.go
155
builder/node.go
@@ -2,9 +2,12 @@ package builder
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/dockerutil"
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
@@ -14,7 +17,6 @@ import (
|
|||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
@@ -24,13 +26,16 @@ type Node struct {
|
|||||||
Builder string
|
Builder string
|
||||||
Driver *driver.DriverHandle
|
Driver *driver.DriverHandle
|
||||||
DriverInfo *driver.Info
|
DriverInfo *driver.Info
|
||||||
Platforms []ocispecs.Platform
|
|
||||||
GCPolicy []client.PruneInfo
|
|
||||||
Labels map[string]string
|
|
||||||
ImageOpt imagetools.Opt
|
ImageOpt imagetools.Opt
|
||||||
ProxyConfig map[string]string
|
ProxyConfig map[string]string
|
||||||
Version string
|
Version string
|
||||||
Err error
|
Err error
|
||||||
|
|
||||||
|
// worker settings
|
||||||
|
IDs []string
|
||||||
|
Platforms []ocispecs.Platform
|
||||||
|
GCPolicy []client.PruneInfo
|
||||||
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nodes returns nodes for this builder.
|
// Nodes returns nodes for this builder.
|
||||||
@@ -38,9 +43,42 @@ func (b *Builder) Nodes() []Node {
|
|||||||
return b.nodes
|
return b.nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type LoadNodesOption func(*loadNodesOptions)
|
||||||
|
|
||||||
|
type loadNodesOptions struct {
|
||||||
|
data bool
|
||||||
|
dialMeta map[string][]string
|
||||||
|
clientOpt []client.ClientOpt
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithData() LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.data = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithDialMeta(dialMeta map[string][]string) LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.dialMeta = dialMeta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithClientOpt(clientOpt ...client.ClientOpt) LoadNodesOption {
|
||||||
|
return func(o *loadNodesOptions) {
|
||||||
|
o.clientOpt = clientOpt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// LoadNodes loads and returns nodes for this builder.
|
// LoadNodes loads and returns nodes for this builder.
|
||||||
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
||||||
func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) {
|
func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []Node, err error) {
|
||||||
|
lno := loadNodesOptions{
|
||||||
|
data: false,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&lno)
|
||||||
|
}
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
|
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
|
||||||
|
|
||||||
@@ -50,7 +88,7 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
factory, err := b.Factory(ctx)
|
factory, err := b.Factory(ctx, lno.dialMeta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -79,37 +117,19 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
contextStore := b.opts.dockerCli.ContextStore()
|
d, err := driver.GetDriver(ctx, factory, driver.InitConfig{
|
||||||
|
Name: driver.BuilderName(n.Name),
|
||||||
var kcc driver.KubeClientConfig
|
EndpointAddr: n.Endpoint,
|
||||||
kcc, err = ctxkube.ConfigFromContext(n.Endpoint, contextStore)
|
DockerAPI: dockerapi,
|
||||||
if err != nil {
|
ContextStore: b.opts.dockerCli.ContextStore(),
|
||||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
BuildkitdFlags: n.BuildkitdFlags,
|
||||||
// try again with name="default".
|
Files: n.Files,
|
||||||
// FIXME(@AkihiroSuda): n should retain real context name.
|
DriverOpts: n.DriverOpts,
|
||||||
kcc, err = ctxkube.ConfigFromContext("default", contextStore)
|
Auth: imageopt.Auth,
|
||||||
if err != nil {
|
Platforms: n.Platforms,
|
||||||
logrus.Error(err)
|
ContextPathHash: b.opts.contextPathHash,
|
||||||
}
|
DialMeta: lno.dialMeta,
|
||||||
}
|
})
|
||||||
|
|
||||||
tryToUseKubeConfigInCluster := false
|
|
||||||
if kcc == nil {
|
|
||||||
tryToUseKubeConfigInCluster = true
|
|
||||||
} else {
|
|
||||||
if _, err := kcc.ClientConfig(); err != nil {
|
|
||||||
tryToUseKubeConfigInCluster = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tryToUseKubeConfigInCluster {
|
|
||||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
|
||||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
|
||||||
logrus.Debug("using kube config in cluster")
|
|
||||||
kcc = kccInCluster
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
node.Err = err
|
node.Err = err
|
||||||
return nil
|
return nil
|
||||||
@@ -117,8 +137,8 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
node.Driver = d
|
node.Driver = d
|
||||||
node.ImageOpt = imageopt
|
node.ImageOpt = imageopt
|
||||||
|
|
||||||
if withData {
|
if lno.data {
|
||||||
if err := node.loadData(ctx); err != nil {
|
if err := node.loadData(ctx, lno.clientOpt...); err != nil {
|
||||||
node.Err = err
|
node.Err = err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,7 +152,7 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: This should be done in the routine loading driver data
|
// TODO: This should be done in the routine loading driver data
|
||||||
if withData {
|
if lno.data {
|
||||||
kubernetesDriverCount := 0
|
kubernetesDriverCount := 0
|
||||||
for _, d := range b.nodes {
|
for _, d := range b.nodes {
|
||||||
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
|
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
|
||||||
@@ -153,7 +173,7 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
diClone.Platforms = pl
|
diClone.Platforms = pl
|
||||||
}
|
}
|
||||||
nodes = append(nodes, di)
|
nodes = append(nodes, diClone)
|
||||||
}
|
}
|
||||||
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
|
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
|
||||||
}
|
}
|
||||||
@@ -169,7 +189,52 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e
|
|||||||
return b.nodes, nil
|
return b.nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) loadData(ctx context.Context) error {
|
func (n *Node) MarshalJSON() ([]byte, error) {
|
||||||
|
var status string
|
||||||
|
if n.DriverInfo != nil {
|
||||||
|
status = n.DriverInfo.Status.String()
|
||||||
|
}
|
||||||
|
var nerr string
|
||||||
|
if n.Err != nil {
|
||||||
|
status = "error"
|
||||||
|
nerr = strings.TrimSpace(n.Err.Error())
|
||||||
|
}
|
||||||
|
var pp []string
|
||||||
|
for _, p := range n.Platforms {
|
||||||
|
pp = append(pp, platforms.Format(p))
|
||||||
|
}
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Name string
|
||||||
|
Endpoint string
|
||||||
|
BuildkitdFlags []string `json:"Flags,omitempty"`
|
||||||
|
DriverOpts map[string]string `json:",omitempty"`
|
||||||
|
Files map[string][]byte `json:",omitempty"`
|
||||||
|
Status string `json:",omitempty"`
|
||||||
|
ProxyConfig map[string]string `json:",omitempty"`
|
||||||
|
Version string `json:",omitempty"`
|
||||||
|
Err string `json:",omitempty"`
|
||||||
|
IDs []string `json:",omitempty"`
|
||||||
|
Platforms []string `json:",omitempty"`
|
||||||
|
GCPolicy []client.PruneInfo `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
}{
|
||||||
|
Name: n.Name,
|
||||||
|
Endpoint: n.Endpoint,
|
||||||
|
BuildkitdFlags: n.BuildkitdFlags,
|
||||||
|
DriverOpts: n.DriverOpts,
|
||||||
|
Files: n.Files,
|
||||||
|
Status: status,
|
||||||
|
ProxyConfig: n.ProxyConfig,
|
||||||
|
Version: n.Version,
|
||||||
|
Err: nerr,
|
||||||
|
IDs: n.IDs,
|
||||||
|
Platforms: pp,
|
||||||
|
GCPolicy: n.GCPolicy,
|
||||||
|
Labels: n.Labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) error {
|
||||||
if n.Driver == nil {
|
if n.Driver == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -179,7 +244,7 @@ func (n *Node) loadData(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
n.DriverInfo = info
|
n.DriverInfo = info
|
||||||
if n.DriverInfo.Status == driver.Running {
|
if n.DriverInfo.Status == driver.Running {
|
||||||
driverClient, err := n.Driver.Client(ctx)
|
driverClient, err := n.Driver.Client(ctx, clientOpt...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -188,12 +253,14 @@ func (n *Node) loadData(ctx context.Context) error {
|
|||||||
return errors.Wrap(err, "listing workers")
|
return errors.Wrap(err, "listing workers")
|
||||||
}
|
}
|
||||||
for idx, w := range workers {
|
for idx, w := range workers {
|
||||||
|
n.IDs = append(n.IDs, w.ID)
|
||||||
n.Platforms = append(n.Platforms, w.Platforms...)
|
n.Platforms = append(n.Platforms, w.Platforms...)
|
||||||
if idx == 0 {
|
if idx == 0 {
|
||||||
n.GCPolicy = w.GCPolicy
|
n.GCPolicy = w.GCPolicy
|
||||||
n.Labels = w.Labels
|
n.Labels = w.Labels
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
sort.Strings(n.IDs)
|
||||||
n.Platforms = platformutil.Dedupe(n.Platforms)
|
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||||
inf, err := driverClient.Info(ctx)
|
inf, err := driverClient.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@@ -15,6 +16,7 @@ import (
|
|||||||
cliflags "github.com/docker/cli/cli/flags"
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
//nolint:staticcheck // vendored dependencies may still use this
|
//nolint:staticcheck // vendored dependencies may still use this
|
||||||
"github.com/containerd/containerd/pkg/seed"
|
"github.com/containerd/containerd/pkg/seed"
|
||||||
@@ -38,10 +40,27 @@ func runStandalone(cmd *command.DockerCli) error {
|
|||||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer flushMetrics(cmd)
|
||||||
|
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
||||||
return rootCmd.Execute()
|
return rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// flushMetrics will manually flush metrics from the configured
|
||||||
|
// meter provider. This is needed when running in standalone mode
|
||||||
|
// because the meter provider is initialized by the cli library,
|
||||||
|
// but the mechanism for forcing it to report is not presently
|
||||||
|
// exposed and not invoked when run in standalone mode.
|
||||||
|
// There are plans to fix that in the next release, but this is
|
||||||
|
// needed temporarily until the API for this is more thorough.
|
||||||
|
func flushMetrics(cmd *command.DockerCli) {
|
||||||
|
if mp, ok := cmd.MeterProvider().(command.MeterProvider); ok {
|
||||||
|
if err := mp.ForceFlush(context.Background()); err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func runPlugin(cmd *command.DockerCli) error {
|
func runPlugin(cmd *command.DockerCli) error {
|
||||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
||||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"github.com/moby/buildkit/util/tracing/detect"
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/env"
|
_ "github.com/moby/buildkit/util/tracing/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1,4 @@
|
|||||||
comment: false
|
comment: false
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
- "**/*.pb.go"
|
||||||
|
|||||||
566
commands/bake.go
566
commands/bake.go
@@ -1,44 +1,65 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
"github.com/docker/buildx/bake"
|
"github.com/docker/buildx/bake"
|
||||||
|
"github.com/docker/buildx/bake/hclparser"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/localstate"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/desktop"
|
"github.com/docker/buildx/util/desktop"
|
||||||
"github.com/docker/buildx/util/dockerutil"
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/osutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
overrides []string
|
overrides []string
|
||||||
printOnly bool
|
printOnly bool
|
||||||
|
listTargets bool
|
||||||
|
listVars bool
|
||||||
sbom string
|
sbom string
|
||||||
provenance string
|
provenance string
|
||||||
|
allow []string
|
||||||
|
|
||||||
builder string
|
builder string
|
||||||
metadataFile string
|
metadataFile string
|
||||||
exportPush bool
|
exportPush bool
|
||||||
exportLoad bool
|
exportLoad bool
|
||||||
|
callFunc string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||||
ctx := appcontext.Context()
|
mp := dockerCli.MeterProvider()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -48,34 +69,25 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags com
|
|||||||
end(err)
|
end(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var url string
|
url, cmdContext, targets := bakeArgs(targets)
|
||||||
cmdContext := "cwd://"
|
|
||||||
|
|
||||||
if len(targets) > 0 {
|
|
||||||
if build.IsRemoteURL(targets[0]) {
|
|
||||||
url = targets[0]
|
|
||||||
targets = targets[1:]
|
|
||||||
if len(targets) > 0 {
|
|
||||||
if build.IsRemoteURL(targets[0]) {
|
|
||||||
cmdContext = targets[0]
|
|
||||||
targets = targets[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
if len(targets) == 0 {
|
||||||
targets = []string{"default"}
|
targets = []string{"default"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
callFunc, err := buildflags.ParseCallFunc(in.callFunc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
overrides := in.overrides
|
overrides := in.overrides
|
||||||
if in.exportPush {
|
if in.exportPush {
|
||||||
if in.exportLoad {
|
|
||||||
return errors.Errorf("push and load may not be set together at the moment")
|
|
||||||
}
|
|
||||||
overrides = append(overrides, "*.push=true")
|
overrides = append(overrides, "*.push=true")
|
||||||
} else if in.exportLoad {
|
}
|
||||||
overrides = append(overrides, "*.output=type=docker")
|
if in.exportLoad {
|
||||||
|
overrides = append(overrides, "*.load=true")
|
||||||
|
}
|
||||||
|
if callFunc != nil {
|
||||||
|
overrides = append(overrides, fmt.Sprintf("*.call=%s", callFunc.Name))
|
||||||
}
|
}
|
||||||
if cFlags.noCache != nil {
|
if cFlags.noCache != nil {
|
||||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
|
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
|
||||||
@@ -91,15 +103,19 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags com
|
|||||||
}
|
}
|
||||||
contextPathHash, _ := os.Getwd()
|
contextPathHash, _ := os.Getwd()
|
||||||
|
|
||||||
|
ent, err := bake.ParseEntitlements(in.allow)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var nodes []builder.Node
|
var nodes []builder.Node
|
||||||
var files []bake.File
|
|
||||||
var inp *bake.Input
|
|
||||||
var progressConsoleDesc, progressTextDesc string
|
var progressConsoleDesc, progressTextDesc string
|
||||||
|
|
||||||
// instance only needed for reading remote bake files or building
|
// instance only needed for reading remote bake files or building
|
||||||
|
var driverType string
|
||||||
if url != "" || !in.printOnly {
|
if url != "" || !in.printOnly {
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(in.builder),
|
||||||
@@ -111,53 +127,72 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags com
|
|||||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
return errors.Wrapf(err, "failed to update builder last activity time")
|
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
}
|
}
|
||||||
nodes, err = b.LoadNodes(ctx, false)
|
nodes, err = b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
||||||
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
||||||
|
driverType = b.Driver
|
||||||
}
|
}
|
||||||
|
|
||||||
var term bool
|
var term bool
|
||||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||||
term = true
|
term = true
|
||||||
}
|
}
|
||||||
|
attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in)
|
||||||
|
|
||||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, cFlags.progress,
|
progressMode := progressui.DisplayMode(cFlags.progress)
|
||||||
|
var printer *progress.Printer
|
||||||
|
|
||||||
|
makePrinter := func() error {
|
||||||
|
var err error
|
||||||
|
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||||
|
progress.WithMetrics(mp, attributes),
|
||||||
|
progress.WithOnClose(func() {
|
||||||
|
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||||
|
}),
|
||||||
)
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := makePrinter(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
if len(files) == 0 {
|
||||||
if printer != nil {
|
return errors.New("couldn't find a bake definition")
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
if err == nil && cFlags.progress != progress.PrinterModeQuiet {
|
|
||||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if url != "" {
|
|
||||||
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer)
|
|
||||||
} else {
|
|
||||||
files, err = bake.ReadLocalFiles(in.files, dockerCli.In())
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
defaults := map[string]string{
|
||||||
// don't forget to update documentation if you add a new
|
// don't forget to update documentation if you add a new
|
||||||
// built-in variable: docs/bake-reference.md#built-in-variables
|
// built-in variable: docs/bake-reference.md#built-in-variables
|
||||||
"BAKE_CMD_CONTEXT": cmdContext,
|
"BAKE_CMD_CONTEXT": cmdContext,
|
||||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if in.listTargets || in.listVars {
|
||||||
|
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = printer.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if in.listTargets {
|
||||||
|
return printTargetList(dockerCli.Out(), cfg)
|
||||||
|
} else if in.listVars {
|
||||||
|
return printVars(dockerCli.Out(), pm.AllVariables)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -181,42 +216,192 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags com
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
def := struct {
|
||||||
dt, err := json.MarshalIndent(struct {
|
|
||||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||||
Target map[string]*bake.Target `json:"target"`
|
Target map[string]*bake.Target `json:"target"`
|
||||||
}{
|
}{
|
||||||
grps,
|
Group: grps,
|
||||||
tgts,
|
Target: tgts,
|
||||||
}, "", " ")
|
}
|
||||||
|
|
||||||
|
if in.printOnly {
|
||||||
|
if err = printer.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = printer.Wait()
|
_, err = fmt.Fprintln(dockerCli.Out(), string(dtdef))
|
||||||
printer = nil
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range bo {
|
||||||
|
if opt.CallFunc != nil {
|
||||||
|
cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
opt.CallFunc.Name = cf.Name
|
||||||
return nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
exp, err := ent.Validate(bo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapBuildError(err, true)
|
return err
|
||||||
|
}
|
||||||
|
if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if printer.IsDone() {
|
||||||
|
// init new printer as old one was stopped to show the prompt
|
||||||
|
if err := makePrinter(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
done := timeBuildCommand(mp, attributes)
|
||||||
|
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
|
if err := printer.Wait(); retErr == nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
if retErr != nil {
|
||||||
|
err = wrapBuildError(retErr, true)
|
||||||
|
}
|
||||||
|
done(err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||||
|
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||||
|
}
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 {
|
||||||
dt := make(map[string]interface{})
|
dt := make(map[string]interface{})
|
||||||
for t, r := range resp {
|
for t, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||||
}
|
}
|
||||||
|
if callFunc == nil {
|
||||||
|
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||||
|
dt["buildx.build.warnings"] = warnings
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var callFormatJSON bool
|
||||||
|
jsonResults := map[string]map[string]any{}
|
||||||
|
if callFunc != nil {
|
||||||
|
callFormatJSON = callFunc.Format == "json"
|
||||||
|
}
|
||||||
|
var sep bool
|
||||||
|
var exitCode int
|
||||||
|
|
||||||
|
names := make([]string, 0, len(bo))
|
||||||
|
for name := range bo {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
slices.Sort(names)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
req := bo[name]
|
||||||
|
if req.CallFunc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pf := &pb.CallFunc{
|
||||||
|
Name: req.CallFunc.Name,
|
||||||
|
Format: req.CallFunc.Format,
|
||||||
|
IgnoreStatus: req.CallFunc.IgnoreStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
if callFunc != nil {
|
||||||
|
pf.Format = callFunc.Format
|
||||||
|
pf.IgnoreStatus = callFunc.IgnoreStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
var res map[string]string
|
||||||
|
if sp, ok := resp[name]; ok {
|
||||||
|
res = sp.ExporterResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
if callFormatJSON {
|
||||||
|
jsonResults[name] = map[string]any{}
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if code, err := printResult(buf, pf, res); err != nil {
|
||||||
|
jsonResults[name]["error"] = err.Error()
|
||||||
|
exitCode = 1
|
||||||
|
} else if code != 0 && exitCode == 0 {
|
||||||
|
exitCode = code
|
||||||
|
}
|
||||||
|
m := map[string]*json.RawMessage{}
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &m); err == nil {
|
||||||
|
for k, v := range m {
|
||||||
|
jsonResults[name][k] = v
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
jsonResults[name][pf.Name] = json.RawMessage(buf.Bytes())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if sep {
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
} else {
|
||||||
|
sep = true
|
||||||
|
}
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||||
|
if descr := tgts[name].Description; descr != "" {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "%s\n", descr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
if code, err := printResult(dockerCli.Out(), pf, res); err != nil {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||||
|
exitCode = 1
|
||||||
|
} else if code != 0 && exitCode == 0 {
|
||||||
|
exitCode = code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if callFormatJSON {
|
||||||
|
out := struct {
|
||||||
|
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||||
|
Target map[string]map[string]any `json:"target"`
|
||||||
|
}{
|
||||||
|
Group: grps,
|
||||||
|
Target: map[string]map[string]any{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, def := range tgts {
|
||||||
|
out.Target[name] = map[string]any{
|
||||||
|
"build": def,
|
||||||
|
}
|
||||||
|
if res, ok := jsonResults[name]; ok {
|
||||||
|
printName := bo[name].CallFunc.Name
|
||||||
|
if printName == "lint" {
|
||||||
|
printName = "check"
|
||||||
|
}
|
||||||
|
out.Target[name][printName] = res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dt, err := json.MarshalIndent(out, "", " ")
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||||
|
}
|
||||||
|
|
||||||
|
if exitCode != 0 {
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
@@ -238,7 +423,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
options.metadataFile = cFlags.metadataFile
|
options.metadataFile = cFlags.metadataFile
|
||||||
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
||||||
return runBake(dockerCli, args, options, cFlags)
|
return runBake(cmd.Context(), dockerCli, args, options, cFlags)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.BakeTargets(options.files),
|
ValidArgsFunction: completion.BakeTargets(options.files),
|
||||||
}
|
}
|
||||||
@@ -252,8 +437,263 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||||
|
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||||
|
flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources")
|
||||||
|
|
||||||
|
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||||
|
flags.Lookup("check").NoOptDefVal = "true"
|
||||||
|
|
||||||
|
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||||
|
cobrautil.MarkFlagsExperimental(flags, "list-targets")
|
||||||
|
flags.MarkHidden("list-targets")
|
||||||
|
|
||||||
|
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||||
|
cobrautil.MarkFlagsExperimental(flags, "list-variables")
|
||||||
|
flags.MarkHidden("list-variables")
|
||||||
|
|
||||||
commonBuildFlags(&cFlags, flags)
|
commonBuildFlags(&cFlags, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
||||||
|
prm := confutil.MetadataProvenance()
|
||||||
|
if len(in.metadataFile) == 0 {
|
||||||
|
prm = confutil.MetadataProvenanceModeDisabled
|
||||||
|
}
|
||||||
|
groupRef := identity.NewID()
|
||||||
|
refs := make([]string, 0, len(bo))
|
||||||
|
for k, b := range bo {
|
||||||
|
b.Ref = identity.NewID()
|
||||||
|
b.GroupRef = groupRef
|
||||||
|
b.ProvenanceResponseMode = prm
|
||||||
|
refs = append(refs, b.Ref)
|
||||||
|
bo[k] = b
|
||||||
|
}
|
||||||
|
l, err := localstate.New(confutil.ConfigDir(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||||
|
Definition: dtdef,
|
||||||
|
Targets: targets,
|
||||||
|
Inputs: overrides,
|
||||||
|
Refs: refs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// bakeArgs will retrieve the remote url, command context, and targets
|
||||||
|
// from the command line arguments.
|
||||||
|
func bakeArgs(args []string) (url, cmdContext string, targets []string) {
|
||||||
|
cmdContext, targets = "cwd://", args
|
||||||
|
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
url, targets = targets[0], targets[1:]
|
||||||
|
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
cmdContext, targets = targets[0], targets[1:]
|
||||||
|
return url, cmdContext, targets
|
||||||
|
}
|
||||||
|
|
||||||
|
func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) {
|
||||||
|
var lnames []string // local
|
||||||
|
var rnames []string // remote
|
||||||
|
var anames []string // both
|
||||||
|
for _, v := range names {
|
||||||
|
if strings.HasPrefix(v, "cwd://") {
|
||||||
|
tname := strings.TrimPrefix(v, "cwd://")
|
||||||
|
lnames = append(lnames, tname)
|
||||||
|
anames = append(anames, tname)
|
||||||
|
} else {
|
||||||
|
rnames = append(rnames, v)
|
||||||
|
anames = append(anames, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if url != "" {
|
||||||
|
var rfiles []bake.File
|
||||||
|
rfiles, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, rnames, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
files = append(files, rfiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lnames) > 0 || url == "" {
|
||||||
|
var lfiles []bake.File
|
||||||
|
progress.Wrap("[internal] load local bake definitions", pw.Write, func(sub progress.SubLogger) error {
|
||||||
|
if url != "" {
|
||||||
|
lfiles, err = bake.ReadLocalFiles(lnames, stdin, sub)
|
||||||
|
} else {
|
||||||
|
lfiles, err = bake.ReadLocalFiles(anames, stdin, sub)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
files = append(files, lfiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVars(w io.Writer, vars []*hclparser.Variable) error {
|
||||||
|
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||||
|
return cmp.Compare(a.Name, b.Name)
|
||||||
|
})
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
|
||||||
|
tw.Write([]byte("VARIABLE\tVALUE\tDESCRIPTION\n"))
|
||||||
|
|
||||||
|
for _, v := range vars {
|
||||||
|
var value string
|
||||||
|
if v.Value != nil {
|
||||||
|
value = *v.Value
|
||||||
|
} else {
|
||||||
|
value = "<null>"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", v.Name, value, v.Description)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printTargetList(w io.Writer, cfg *bake.Config) error {
|
||||||
|
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
|
||||||
|
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||||
|
|
||||||
|
type targetOrGroup struct {
|
||||||
|
name string
|
||||||
|
target *bake.Target
|
||||||
|
group *bake.Group
|
||||||
|
}
|
||||||
|
|
||||||
|
list := make([]targetOrGroup, 0, len(cfg.Targets)+len(cfg.Groups))
|
||||||
|
for _, tgt := range cfg.Targets {
|
||||||
|
list = append(list, targetOrGroup{name: tgt.Name, target: tgt})
|
||||||
|
}
|
||||||
|
for _, grp := range cfg.Groups {
|
||||||
|
list = append(list, targetOrGroup{name: grp.Name, group: grp})
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(list, func(a, b targetOrGroup) int {
|
||||||
|
return cmp.Compare(a.name, b.name)
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, tgt := range list {
|
||||||
|
if strings.HasPrefix(tgt.name, "_") {
|
||||||
|
// convention for a private target
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var descr string
|
||||||
|
if tgt.target != nil {
|
||||||
|
descr = tgt.target.Description
|
||||||
|
} else if tgt.group != nil {
|
||||||
|
descr = tgt.group.Description
|
||||||
|
|
||||||
|
if len(tgt.group.Targets) > 0 {
|
||||||
|
slices.Sort(tgt.group.Targets)
|
||||||
|
names := strings.Join(tgt.group.Targets, ", ")
|
||||||
|
if descr != "" {
|
||||||
|
descr += " (" + names + ")"
|
||||||
|
} else {
|
||||||
|
descr = names
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set {
|
||||||
|
return attribute.NewSet(
|
||||||
|
commandNameAttribute.String("bake"),
|
||||||
|
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||||
|
bakeOptions: options,
|
||||||
|
configDir: confutil.ConfigDir(dockerCli),
|
||||||
|
url: url,
|
||||||
|
cmdContext: cmdContext,
|
||||||
|
targets: targets,
|
||||||
|
}),
|
||||||
|
driverNameAttribute.String(options.builder),
|
||||||
|
driverTypeAttribute.String(driverType),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bakeOptionsHash struct {
|
||||||
|
*bakeOptions
|
||||||
|
configDir string
|
||||||
|
url string
|
||||||
|
cmdContext string
|
||||||
|
targets []string
|
||||||
|
result string
|
||||||
|
resultOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *bakeOptionsHash) String() string {
|
||||||
|
o.resultOnce.Do(func() {
|
||||||
|
url := o.url
|
||||||
|
cmdContext := o.cmdContext
|
||||||
|
if cmdContext == "cwd://" {
|
||||||
|
// Resolve the directory if the cmdContext is the current working directory.
|
||||||
|
cmdContext = osutil.GetWd()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the inputs for files and targets since the ordering
|
||||||
|
// doesn't matter, but avoid modifying the original slice.
|
||||||
|
files := immutableSort(o.files)
|
||||||
|
targets := immutableSort(o.targets)
|
||||||
|
|
||||||
|
joinedFiles := strings.Join(files, ",")
|
||||||
|
joinedTargets := strings.Join(targets, ",")
|
||||||
|
salt := confutil.TryNodeIdentifier(o.configDir)
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||||
|
_, _ = io.WriteString(h, s)
|
||||||
|
h.Write([]byte{0})
|
||||||
|
}
|
||||||
|
o.result = hex.EncodeToString(h.Sum(nil))
|
||||||
|
})
|
||||||
|
return o.result
|
||||||
|
}
|
||||||
|
|
||||||
|
// immutableSort will sort the entries in s without modifying the original slice.
|
||||||
|
func immutableSort(s []string) []string {
|
||||||
|
if !sort.StringsAreSorted(s) {
|
||||||
|
cpy := make([]string, len(s))
|
||||||
|
copy(cpy, s)
|
||||||
|
sort.Strings(cpy)
|
||||||
|
return cpy
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncWriter struct {
|
||||||
|
w io.Writer
|
||||||
|
once sync.Once
|
||||||
|
wait func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||||
|
w.once.Do(func() {
|
||||||
|
if w.wait != nil {
|
||||||
|
err = w.wait()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return w.w.Write(p)
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,31 +3,15 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
k8sutil "github.com/docker/buildx/driver/kubernetes/util"
|
|
||||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
|
||||||
"github.com/docker/buildx/localstate"
|
|
||||||
"github.com/docker/buildx/store"
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/dockerutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
dopts "github.com/docker/cli/opts"
|
|
||||||
"github.com/google/shlex"
|
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -39,266 +23,52 @@ type createOptions struct {
|
|||||||
actionAppend bool
|
actionAppend bool
|
||||||
actionLeave bool
|
actionLeave bool
|
||||||
use bool
|
use bool
|
||||||
flags string
|
|
||||||
configFile string
|
|
||||||
driverOpts []string
|
driverOpts []string
|
||||||
|
buildkitdFlags string
|
||||||
|
buildkitdConfigFile string
|
||||||
bootstrap bool
|
bootstrap bool
|
||||||
// upgrade bool // perform upgrade of the driver
|
// upgrade bool // perform upgrade of the driver
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
if in.name == "default" {
|
|
||||||
return errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.actionLeave {
|
|
||||||
if in.name == "" {
|
|
||||||
return errors.Errorf("leave requires instance name")
|
|
||||||
}
|
|
||||||
if in.nodeName == "" {
|
|
||||||
return errors.Errorf("leave requires node name but --node not set")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.actionAppend {
|
|
||||||
if in.name == "" {
|
|
||||||
logrus.Warnf("append used without name, creating a new instance instead")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Ensure the file lock gets released no matter what happens.
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
name := in.name
|
|
||||||
if name == "" {
|
|
||||||
name, err = store.GenerateName(txn)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !in.actionLeave && !in.actionAppend {
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, c := range contexts {
|
|
||||||
if c.Name == name {
|
|
||||||
logrus.Warnf("instance name %q already exists as context builder", name)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ng, err := txn.NodeGroupByName(name)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(errors.Cause(err)) {
|
|
||||||
if in.actionAppend && in.name != "" {
|
|
||||||
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
|
||||||
}
|
|
||||||
if in.actionLeave {
|
if in.actionLeave {
|
||||||
return errors.Errorf("failed to find instance %q for leave", in.name)
|
return builder.Leave(ctx, txn, dockerCli, builder.LeaveOpts{
|
||||||
}
|
Name: in.name,
|
||||||
} else {
|
NodeName: in.nodeName,
|
||||||
return err
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
|
||||||
|
|
||||||
driverName := in.driver
|
|
||||||
if driverName == "" {
|
|
||||||
if ng != nil {
|
|
||||||
driverName = ng.Driver
|
|
||||||
} else if len(args) == 0 && buildkitHost != "" {
|
|
||||||
driverName = "remote"
|
|
||||||
} else {
|
|
||||||
var arg string
|
|
||||||
if len(args) > 0 {
|
|
||||||
arg = args[0]
|
|
||||||
}
|
|
||||||
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f == nil {
|
|
||||||
return errors.Errorf("no valid drivers found")
|
|
||||||
}
|
|
||||||
driverName = f.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng != nil {
|
|
||||||
if in.nodeName == "" && !in.actionAppend {
|
|
||||||
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name)
|
|
||||||
}
|
|
||||||
if driverName != ng.Driver {
|
|
||||||
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := driver.GetFactory(driverName, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ngOriginal := ng
|
|
||||||
if ngOriginal != nil {
|
|
||||||
ngOriginal = ngOriginal.Copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng == nil {
|
|
||||||
ng = &store.NodeGroup{
|
|
||||||
Name: name,
|
|
||||||
Driver: driverName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags []string
|
|
||||||
if in.flags != "" {
|
|
||||||
flags, err = shlex.Split(in.flags)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to parse buildkit flags")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var ep string
|
var ep string
|
||||||
var setEp bool
|
|
||||||
if in.actionLeave {
|
|
||||||
if err := ng.Leave(in.nodeName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ls, err := localstate.New(confutil.ConfigDir(dockerCli))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := ls.RemoveBuilderNode(ng.Name, in.nodeName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
switch {
|
|
||||||
case driverName == "kubernetes":
|
|
||||||
if len(args) > 0 {
|
|
||||||
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0])
|
|
||||||
}
|
|
||||||
// generate node name if not provided to avoid duplicated endpoint
|
|
||||||
// error: https://github.com/docker/setup-buildx-action/issues/215
|
|
||||||
nodeName := in.nodeName
|
|
||||||
if nodeName == "" {
|
|
||||||
nodeName, err = k8sutil.GenerateNodeName(name, txn)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// naming endpoint to make --append works
|
|
||||||
ep = (&url.URL{
|
|
||||||
Scheme: driverName,
|
|
||||||
Path: "/" + name,
|
|
||||||
RawQuery: (&url.Values{
|
|
||||||
"deployment": {nodeName},
|
|
||||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
|
||||||
}).Encode(),
|
|
||||||
}).String()
|
|
||||||
setEp = false
|
|
||||||
case driverName == "remote":
|
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
ep = args[0]
|
ep = args[0]
|
||||||
} else if buildkitHost != "" {
|
|
||||||
ep = buildkitHost
|
|
||||||
} else {
|
|
||||||
return errors.Errorf("no remote endpoint provided")
|
|
||||||
}
|
|
||||||
ep, err = validateBuildkitEndpoint(ep)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
case len(args) > 0:
|
|
||||||
ep, err = validateEndpoint(dockerCli, args[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
default:
|
|
||||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
|
||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
|
||||||
}
|
|
||||||
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := csvToMap(in.driverOpts)
|
b, err := builder.Create(ctx, txn, dockerCli, builder.CreateOpts{
|
||||||
|
Name: in.name,
|
||||||
|
Driver: in.driver,
|
||||||
|
NodeName: in.nodeName,
|
||||||
|
Platforms: in.platform,
|
||||||
|
DriverOpts: in.driverOpts,
|
||||||
|
BuildkitdFlags: in.buildkitdFlags,
|
||||||
|
BuildkitdConfigFile: in.buildkitdConfigFile,
|
||||||
|
Use: in.use,
|
||||||
|
Endpoint: ep,
|
||||||
|
Append: in.actionAppend,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.configFile == "" {
|
// The store is no longer used from this point.
|
||||||
// if buildkit config is not provided, check if the default one is
|
// Release it so we aren't holding the file lock during the boot.
|
||||||
// available and use it
|
release()
|
||||||
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
|
||||||
logrus.Warnf("Using default BuildKit config in %s", f)
|
|
||||||
in.configFile = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ng.Update(in.nodeName, ep, in.platform, setEp, in.actionAppend, flags, in.configFile, m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := txn.Save(ng); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := builder.New(dockerCli,
|
|
||||||
builder.WithName(ng.Name),
|
|
||||||
builder.WithStore(txn),
|
|
||||||
builder.WithSkippedValidation(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
if err := node.Err; err != nil {
|
|
||||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
|
||||||
var err2 error
|
|
||||||
if ngOriginal == nil {
|
|
||||||
err2 = txn.Remove(ng.Name)
|
|
||||||
} else {
|
|
||||||
err2 = txn.Save(ngOriginal)
|
|
||||||
}
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Warnf("Could not rollback to previous state: %s", err2)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.use && ep != "" {
|
|
||||||
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
if _, err = b.Boot(ctx); err != nil {
|
if _, err = b.Boot(ctx); err != nil {
|
||||||
@@ -306,7 +76,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s\n", ng.Name)
|
fmt.Printf("%s\n", b.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +96,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Short: "Create a new builder instance",
|
Short: "Create a new builder instance",
|
||||||
Args: cli.RequiresMaxArgs(1),
|
Args: cli.RequiresMaxArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
@@ -336,12 +106,16 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
||||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
|
||||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
|
||||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
flags.StringVar(&options.buildkitdFlags, "buildkitd-flags", "", "BuildKit daemon flags")
|
||||||
|
|
||||||
|
// we allow for both "--config" and "--buildkitd-config", although the latter is the recommended way to avoid ambiguity.
|
||||||
|
flags.StringVar(&options.buildkitdConfigFile, "buildkitd-config", "", "BuildKit daemon config file")
|
||||||
|
flags.StringVar(&options.buildkitdConfigFile, "config", "", "BuildKit daemon config file")
|
||||||
|
flags.MarkHidden("config")
|
||||||
|
|
||||||
|
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||||
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
||||||
@@ -351,49 +125,3 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func csvToMap(in []string) (map[string]string, error) {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string, len(in))
|
|
||||||
for _, s := range in {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(s))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, v := range fields {
|
|
||||||
p := strings.SplitN(v, "=", 2)
|
|
||||||
if len(p) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
|
||||||
}
|
|
||||||
m[p[0]] = p[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
|
||||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
|
||||||
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
|
||||||
if err == nil && dem != nil {
|
|
||||||
if ep == "default" {
|
|
||||||
return dem.Host, nil
|
|
||||||
}
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
h, err := dopts.ParseHost(true, ep)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
|
||||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
|
||||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCsvToMap(t *testing.T) {
|
|
||||||
d := []string{
|
|
||||||
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
|
||||||
"namespace=default",
|
|
||||||
}
|
|
||||||
r, err := csvToMap(d)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, r, "tolerations")
|
|
||||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
|
||||||
|
|
||||||
require.Contains(t, r, "replicas")
|
|
||||||
require.Equal(t, r["replicas"], "1")
|
|
||||||
|
|
||||||
require.Contains(t, r, "namespace")
|
|
||||||
require.Equal(t, r["namespace"], "default")
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/containerd/console"
|
|
||||||
"github.com/docker/buildx/controller"
|
|
||||||
"github.com/docker/buildx/controller/control"
|
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
|
||||||
"github.com/docker/buildx/monitor"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
func debugShellCmd(dockerCli command.Cli) *cobra.Command {
|
|
||||||
var options control.ControlOptions
|
|
||||||
var progressMode string
|
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "debug-shell",
|
|
||||||
Short: "Start a monitor",
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
|
||||||
c, err := controller.NewController(ctx, options, dockerCli, printer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := c.Close(); err != nil {
|
|
||||||
logrus.Warnf("failed to close server connection %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
con := console.Current()
|
|
||||||
if err := con.SetRaw(); err != nil {
|
|
||||||
return errors.Errorf("failed to configure terminal: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
|
|
||||||
Tty: true,
|
|
||||||
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
|
||||||
con.Reset()
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := cmd.Flags()
|
|
||||||
|
|
||||||
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect [experimental]")
|
|
||||||
flags.BoolVar(&options.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server (supported only on linux) [experimental]")
|
|
||||||
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server) [experimental]")
|
|
||||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func addDebugShellCommand(cmd *cobra.Command, dockerCli command.Cli) {
|
|
||||||
cmd.AddCommand(
|
|
||||||
debugShellCmd(dockerCli),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
92
commands/debug/root.go
Normal file
92
commands/debug/root.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/buildx/controller"
|
||||||
|
"github.com/docker/buildx/controller/control"
|
||||||
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
|
"github.com/docker/buildx/monitor"
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DebugConfig is a user-specified configuration for the debugger.
|
||||||
|
type DebugConfig struct {
|
||||||
|
// InvokeFlag is a flag to configure the launched debugger and the commaned executed on the debugger.
|
||||||
|
InvokeFlag string
|
||||||
|
|
||||||
|
// OnFlag is a flag to configure the timing of launching the debugger.
|
||||||
|
OnFlag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebuggableCmd is a command that supports debugger with recognizing the user-specified DebugConfig.
|
||||||
|
type DebuggableCmd interface {
|
||||||
|
// NewDebugger returns the new *cobra.Command with support for the debugger with recognizing DebugConfig.
|
||||||
|
NewDebugger(*DebugConfig) *cobra.Command
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
||||||
|
var controlOptions control.ControlOptions
|
||||||
|
var progressMode string
|
||||||
|
var options DebugConfig
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "debug",
|
||||||
|
Short: "Start debugger",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.DisplayMode(progressMode))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := c.Close(); err != nil {
|
||||||
|
logrus.Warnf("failed to close server connection %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
con := console.Current()
|
||||||
|
if err := con.SetRaw(); err != nil {
|
||||||
|
return errors.Errorf("failed to configure terminal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
|
||||||
|
Tty: true,
|
||||||
|
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
||||||
|
con.Reset()
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cobrautil.MarkCommandExperimental(cmd)
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
|
||||||
|
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
|
||||||
|
|
||||||
|
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
|
||||||
|
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
|
||||||
|
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
|
||||||
|
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
||||||
|
|
||||||
|
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
|
||||||
|
|
||||||
|
for _, c := range children {
|
||||||
|
cmd.AddCommand(c.NewDebugger(&options))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
131
commands/dial_stdio.go
Normal file
131
commands/dial_stdio.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stdioOptions struct {
|
||||||
|
builder string
|
||||||
|
platform string
|
||||||
|
progress string
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDialStdio(dockerCli command.Cli, opts stdioOptions) error {
|
||||||
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
|
contextPathHash, _ := os.Getwd()
|
||||||
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(opts.builder),
|
||||||
|
builder.WithContextPathHash(contextPathHash),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
|
}
|
||||||
|
nodes, err := b.LoadNodes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
printer, err := progress.NewPrinter(ctx, os.Stderr, progressui.DisplayMode(opts.progress), progress.WithPhase("dial-stdio"), progress.WithDesc("builder: "+b.Name, "builder:"+b.Name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var p *v1.Platform
|
||||||
|
if opts.platform != "" {
|
||||||
|
pp, err := platforms.Parse(opts.platform)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "invalid platform %q", opts.platform)
|
||||||
|
}
|
||||||
|
p = &pp
|
||||||
|
}
|
||||||
|
|
||||||
|
defer printer.Wait()
|
||||||
|
|
||||||
|
return progress.Wrap("Proxying to builder", printer.Write, func(sub progress.SubLogger) error {
|
||||||
|
var conn net.Conn
|
||||||
|
|
||||||
|
err := sub.Wrap("Dialing builder", func() error {
|
||||||
|
conn, err = build.Dial(ctx, nodes, printer, p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
closeWrite(conn)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var eg errgroup.Group
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
_, err := io.Copy(conn, os.Stdin)
|
||||||
|
closeWrite(conn)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
eg.Go(func() error {
|
||||||
|
_, err := io.Copy(os.Stdout, conn)
|
||||||
|
closeRead(conn)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return eg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeRead(conn net.Conn) error {
|
||||||
|
if c, ok := conn.(interface{ CloseRead() error }); ok {
|
||||||
|
return c.CloseRead()
|
||||||
|
}
|
||||||
|
return conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeWrite(conn net.Conn) error {
|
||||||
|
if c, ok := conn.(interface{ CloseWrite() error }); ok {
|
||||||
|
return c.CloseWrite()
|
||||||
|
}
|
||||||
|
return conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
|
opts := stdioOptions{}
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "dial-stdio",
|
||||||
|
Short: "Proxy current stdio streams to builder instance",
|
||||||
|
Args: cobra.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
opts.builder = rootOpts.builder
|
||||||
|
return runDialStdio(dockerCli, opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection")
|
||||||
|
flags.StringVar(&opts.progress, "progress", "quiet", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -26,9 +26,7 @@ type duOptions struct {
|
|||||||
verbose bool
|
verbose bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -39,7 +37,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(ctx, false)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -114,7 +112,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
return runDiskUsage(dockerCli, options)
|
return runDiskUsage(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,13 +7,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -25,12 +26,14 @@ type createOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
files []string
|
files []string
|
||||||
tags []string
|
tags []string
|
||||||
|
annotations []string
|
||||||
dryrun bool
|
dryrun bool
|
||||||
actionAppend bool
|
actionAppend bool
|
||||||
progress string
|
progress string
|
||||||
|
preferIndex bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
if len(args) == 0 && len(in.files) == 0 {
|
if len(args) == 0 && len(in.files) == 0 {
|
||||||
return errors.Errorf("no sources specified")
|
return errors.Errorf("no sources specified")
|
||||||
}
|
}
|
||||||
@@ -111,8 +114,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -154,7 +155,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, desc, err := r.Combine(ctx, srcs)
|
annotations, err := buildflags.ParseAnnotations(in.annotations)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse annotations")
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, desc, err := r.Combine(ctx, srcs, annotations, in.preferIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -169,7 +175,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -272,7 +278,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
Short: "Create a new image based on source images",
|
Short: "Create a new image based on source images",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *opts.Builder
|
options.builder = *opts.Builder
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
@@ -282,7 +288,9 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||||
|
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||||
|
flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
"github.com/docker/cli-docs-tool/annotation"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -18,9 +19,7 @@ type inspectOptions struct {
|
|||||||
raw bool
|
raw bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
if in.format != "" && in.raw {
|
if in.format != "" && in.raw {
|
||||||
return errors.Errorf("format and raw cannot be used together")
|
return errors.Errorf("format and raw cannot be used together")
|
||||||
}
|
}
|
||||||
@@ -51,7 +50,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
|||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *rootOpts.Builder
|
options.builder = *rootOpts.Builder
|
||||||
return runInspect(dockerCli, options, args[0])
|
return runInspect(cmd.Context(), dockerCli, options, args[0])
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,9 +25,7 @@ type inspectOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(in.builder),
|
||||||
builder.WithSkippedValidation(),
|
builder.WithSkippedValidation(),
|
||||||
@@ -40,7 +37,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, err = b.Boot(ctx)
|
ok, err = b.Boot(ctx)
|
||||||
@@ -48,7 +45,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
nodes, err = b.LoadNodes(timeoutCtx, true)
|
nodes, err = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,13 +84,16 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
||||||
if len(n.Flags) > 0 {
|
if len(n.BuildkitdFlags) > 0 {
|
||||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
fmt.Fprintf(w, "BuildKit daemon flags:\t%s\n", strings.Join(n.BuildkitdFlags, " "))
|
||||||
}
|
}
|
||||||
if nodes[i].Version != "" {
|
if nodes[i].Version != "" {
|
||||||
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
|
fmt.Fprintf(w, "BuildKit version:\t%s\n", nodes[i].Version)
|
||||||
|
}
|
||||||
|
platforms := platformutil.FormatInGroups(n.Node.Platforms, n.Platforms)
|
||||||
|
if len(platforms) > 0 {
|
||||||
|
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platforms, ", "))
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
|
||||||
if debug.IsEnabled() {
|
if debug.IsEnabled() {
|
||||||
fmt.Fprintf(w, "Features:\n")
|
fmt.Fprintf(w, "Features:\n")
|
||||||
features := nodes[i].Driver.Features(ctx)
|
features := nodes[i].Driver.Features(ctx)
|
||||||
@@ -147,7 +147,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
options.builder = args[0]
|
options.builder = args[0]
|
||||||
}
|
}
|
||||||
return runInspect(dockerCli, options)
|
return runInspect(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
type installOptions struct {
|
type installOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInstall(dockerCli command.Cli, in installOptions) error {
|
func runInstall(_ command.Cli, _ installOptions) error {
|
||||||
dir := config.Dir()
|
dir := config.Dir()
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
return errors.Wrap(err, "could not create docker config")
|
return errors.Wrap(err, "could not create docker config")
|
||||||
|
|||||||
235
commands/ls.go
235
commands/ls.go
@@ -2,30 +2,43 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/builder"
|
"github.com/docker/buildx/builder"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lsNameNodeHeader = "NAME/NODE"
|
||||||
|
lsDriverEndpointHeader = "DRIVER/ENDPOINT"
|
||||||
|
lsStatusHeader = "STATUS"
|
||||||
|
lsLastActivityHeader = "LAST ACTIVITY"
|
||||||
|
lsBuildkitHeader = "BUILDKIT"
|
||||||
|
lsPlatformsHeader = "PLATFORMS"
|
||||||
|
|
||||||
|
lsIndent = ` \_ `
|
||||||
|
|
||||||
|
lsDefaultTableFormat = "table {{.Name}}\t{{.DriverEndpoint}}\t{{.Status}}\t{{.Buildkit}}\t{{.Platforms}}"
|
||||||
|
)
|
||||||
|
|
||||||
type lsOptions struct {
|
type lsOptions struct {
|
||||||
|
format string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runLs(dockerCli command.Cli, in lsOptions) error {
|
func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -49,7 +62,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
func(b *builder.Builder) {
|
func(b *builder.Builder) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
_, _ = b.LoadNodes(timeoutCtx, true)
|
_, _ = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(b)
|
}(b)
|
||||||
@@ -59,22 +72,9 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
if hasErrors, err := lsPrint(dockerCli, current, builders, in.format); err != nil {
|
||||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
return err
|
||||||
|
} else if hasErrors {
|
||||||
printErr := false
|
|
||||||
for _, b := range builders {
|
|
||||||
if current.Name == b.Name {
|
|
||||||
b.Name += " *"
|
|
||||||
}
|
|
||||||
if ok := printBuilder(w, b); !ok {
|
|
||||||
printErr = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
if printErr {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if b.Err() != nil {
|
if b.Err() != nil {
|
||||||
@@ -92,31 +92,6 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) {
|
|
||||||
ok = true
|
|
||||||
var err string
|
|
||||||
if b.Err() != nil {
|
|
||||||
ok = false
|
|
||||||
err = "error"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
|
|
||||||
if b.Err() == nil {
|
|
||||||
for _, n := range b.Nodes() {
|
|
||||||
var status string
|
|
||||||
if n.DriverInfo != nil {
|
|
||||||
status = n.DriverInfo.Status.String()
|
|
||||||
}
|
|
||||||
if n.Err != nil {
|
|
||||||
ok = false
|
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options lsOptions
|
var options lsOptions
|
||||||
|
|
||||||
@@ -125,13 +100,175 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Short: "List builder instances",
|
Short: "List builder instances",
|
||||||
Args: cli.ExactArgs(0),
|
Args: cli.ExactArgs(0),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runLs(dockerCli, options)
|
return runLs(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
// hide builder persistent flag for this command
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builder.Builder, format string) (hasErrors bool, _ error) {
|
||||||
|
if format == formatter.TableFormatKey {
|
||||||
|
format = lsDefaultTableFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.Format(format),
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(builders, func(i, j int) bool {
|
||||||
|
ierr := builders[i].Err() != nil
|
||||||
|
jerr := builders[j].Err() != nil
|
||||||
|
if ierr && !jerr {
|
||||||
|
return false
|
||||||
|
} else if !ierr && jerr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return i < j
|
||||||
|
})
|
||||||
|
|
||||||
|
render := func(format func(subContext formatter.SubContext) error) error {
|
||||||
|
for _, b := range builders {
|
||||||
|
if err := format(&lsContext{
|
||||||
|
Builder: &lsBuilder{
|
||||||
|
Builder: b,
|
||||||
|
Current: b.Name == current.Name,
|
||||||
|
},
|
||||||
|
format: ctx.Format,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b.Err() != nil {
|
||||||
|
if ctx.Format.IsTable() {
|
||||||
|
hasErrors = true
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, n := range b.Nodes() {
|
||||||
|
if n.Err != nil {
|
||||||
|
if ctx.Format.IsTable() {
|
||||||
|
hasErrors = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := format(&lsContext{
|
||||||
|
format: ctx.Format,
|
||||||
|
Builder: &lsBuilder{
|
||||||
|
Builder: b,
|
||||||
|
Current: b.Name == current.Name,
|
||||||
|
},
|
||||||
|
node: n,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lsCtx := lsContext{}
|
||||||
|
lsCtx.Header = formatter.SubHeaderContext{
|
||||||
|
"Name": lsNameNodeHeader,
|
||||||
|
"DriverEndpoint": lsDriverEndpointHeader,
|
||||||
|
"LastActivity": lsLastActivityHeader,
|
||||||
|
"Status": lsStatusHeader,
|
||||||
|
"Buildkit": lsBuildkitHeader,
|
||||||
|
"Platforms": lsPlatformsHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
return hasErrors, ctx.Write(&lsCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsBuilder struct {
|
||||||
|
*builder.Builder
|
||||||
|
Current bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsContext struct {
|
||||||
|
formatter.HeaderContext
|
||||||
|
Builder *lsBuilder
|
||||||
|
|
||||||
|
format formatter.Format
|
||||||
|
node builder.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(c.Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Name() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
name := c.Builder.Name
|
||||||
|
if c.Builder.Current && c.format.IsTable() {
|
||||||
|
name += "*"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
if c.format.IsTable() {
|
||||||
|
return lsIndent + c.node.Name
|
||||||
|
}
|
||||||
|
return c.node.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) DriverEndpoint() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return c.Builder.Driver
|
||||||
|
}
|
||||||
|
if c.format.IsTable() {
|
||||||
|
return lsIndent + c.node.Endpoint
|
||||||
|
}
|
||||||
|
return c.node.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) LastActivity() string {
|
||||||
|
if c.node.Name != "" || c.Builder.LastActivity.IsZero() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return c.Builder.LastActivity.UTC().Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Status() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
if c.Builder.Err() != nil {
|
||||||
|
return "error"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if c.node.Err != nil {
|
||||||
|
return "error"
|
||||||
|
}
|
||||||
|
if c.node.DriverInfo != nil {
|
||||||
|
return c.node.DriverInfo.Status.String()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Buildkit() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return c.node.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Platforms() string {
|
||||||
|
if c.node.Name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.Join(platformutil.FormatInGroups(c.node.Node.Platforms, c.node.Platforms), ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *lsContext) Error() string {
|
||||||
|
if c.node.Name != "" && c.node.Err != nil {
|
||||||
|
return c.node.Err.Error()
|
||||||
|
} else if err := c.Builder.Err(); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -35,9 +35,7 @@ const (
|
|||||||
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
||||||
)
|
)
|
||||||
|
|
||||||
func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
pruneFilters := opts.filter.Value()
|
pruneFilters := opts.filter.Value()
|
||||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||||
|
|
||||||
@@ -51,16 +49,20 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
|||||||
warning = allCacheWarning
|
warning = allCacheWarning
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
if !opts.force {
|
||||||
|
if ok, err := prompt(ctx, dockerCli.In(), dockerCli.Out(), warning); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(ctx, false)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -138,7 +140,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
return runPrune(dockerCli, options)
|
return runPrune(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.Disable,
|
ValidArgsFunction: completion.Disable,
|
||||||
}
|
}
|
||||||
@@ -193,6 +195,8 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
case 1:
|
case 1:
|
||||||
if filterKey == "id" {
|
if filterKey == "id" {
|
||||||
filters = append(filters, filterKey+"~="+values[0])
|
filters = append(filters, filterKey+"~="+values[0])
|
||||||
|
} else if strings.HasSuffix(filterKey, "!") || strings.HasSuffix(filterKey, "~") {
|
||||||
|
filters = append(filters, filterKey+"="+values[0])
|
||||||
} else {
|
} else {
|
||||||
filters = append(filters, filterKey+"=="+values[0])
|
filters = append(filters, filterKey+"=="+values[0])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,16 +9,14 @@ import (
|
|||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rmOptions struct {
|
type rmOptions struct {
|
||||||
builder string
|
builders []string
|
||||||
keepState bool
|
keepState bool
|
||||||
keepDaemon bool
|
keepDaemon bool
|
||||||
allInactive bool
|
allInactive bool
|
||||||
@@ -29,12 +27,14 @@ const (
|
|||||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
||||||
)
|
)
|
||||||
|
|
||||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
func runRm(ctx context.Context, dockerCli command.Cli, in rmOptions) error {
|
||||||
ctx := appcontext.Context()
|
if in.allInactive && !in.force {
|
||||||
|
if ok, err := prompt(ctx, dockerCli.In(), dockerCli.Out(), rmInactiveWarning); err != nil {
|
||||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
return err
|
||||||
|
} else if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -46,8 +46,20 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
|||||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
for _, name := range in.builders {
|
||||||
|
func(name string) {
|
||||||
|
eg.Go(func() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if err == nil {
|
||||||
|
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", name)
|
||||||
|
} else {
|
||||||
|
_, _ = fmt.Fprintf(dockerCli.Err(), "failed to remove %s: %v\n", name, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(name),
|
||||||
builder.WithStore(txn),
|
builder.WithStore(txn),
|
||||||
builder.WithSkippedValidation(),
|
builder.WithSkippedValidation(),
|
||||||
)
|
)
|
||||||
@@ -55,7 +67,7 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(ctx, false)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -72,7 +84,14 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
|||||||
return err1
|
return err1
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
return nil
|
||||||
|
})
|
||||||
|
}(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return errors.New("failed to remove one or more builders")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,25 +99,24 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
var options rmOptions
|
var options rmOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "rm [NAME]",
|
Use: "rm [OPTIONS] [NAME] [NAME...]",
|
||||||
Short: "Remove a builder instance",
|
Short: "Remove one or more builder instances",
|
||||||
Args: cli.RequiresMaxArgs(1),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builders = []string{rootOpts.builder}
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
if options.allInactive {
|
if options.allInactive {
|
||||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
return errors.New("cannot specify builder name when --all-inactive is set")
|
||||||
}
|
}
|
||||||
options.builder = args[0]
|
options.builders = args
|
||||||
}
|
}
|
||||||
return runRm(dockerCli, options)
|
return runRm(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the BuildKit daemon running")
|
||||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||||
|
|
||||||
@@ -139,7 +157,7 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
|||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
func(b *builder.Builder) {
|
func(b *builder.Builder) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "cannot load %s", b.Name)
|
return errors.Wrapf(err, "cannot load %s", b.Name)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,20 +3,25 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
debugcmd "github.com/docker/buildx/commands/debug"
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/controller/remote"
|
"github.com/docker/buildx/controller/remote"
|
||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/logutil"
|
"github.com/docker/buildx/util/logutil"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
"github.com/docker/cli-docs-tool/annotation"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||||
|
var opt rootOptions
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Short: "Docker Buildx",
|
Short: "Docker Buildx",
|
||||||
Long: `Extended build capabilities with BuildKit`,
|
Long: `Extended build capabilities with BuildKit`,
|
||||||
@@ -27,12 +32,19 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
CompletionOptions: cobra.CompletionOptions{
|
CompletionOptions: cobra.CompletionOptions{
|
||||||
HiddenDefaultCmd: true,
|
HiddenDefaultCmd: true,
|
||||||
},
|
},
|
||||||
|
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if opt.debug {
|
||||||
|
debug.Enable()
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.SetContext(appcontext.Context())
|
||||||
|
if !isPlugin {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if isPlugin {
|
|
||||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
|
||||||
return plugin.PersistentPreRunE(cmd, args)
|
return plugin.PersistentPreRunE(cmd, args)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
} else {
|
if !isPlugin {
|
||||||
// match plugin behavior for standalone mode
|
// match plugin behavior for standalone mode
|
||||||
// https://github.com/docker/cli/blob/6c9eb708fa6d17765d71965f90e1c59cea686ee9/cli-plugins/plugin/plugin.go#L117-L127
|
// https://github.com/docker/cli/blob/6c9eb708fa6d17765d71965f90e1c59cea686ee9/cli-plugins/plugin/plugin.go#L117-L127
|
||||||
cmd.SilenceUsage = true
|
cmd.SilenceUsage = true
|
||||||
@@ -52,33 +64,27 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
"using default config store",
|
"using default config store",
|
||||||
))
|
))
|
||||||
|
|
||||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
if !confutil.IsExperimental() {
|
||||||
// when listing builder instances with "buildx ls" for those that are
|
cmd.SetHelpTemplate(cmd.HelpTemplate() + "\nExperimental commands and flags are hidden. Set BUILDX_EXPERIMENTAL=1 to show them.\n")
|
||||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
}
|
||||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
|
||||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
|
||||||
logrus.WarnLevel,
|
|
||||||
},
|
|
||||||
"commandConn.CloseWrite:",
|
|
||||||
"commandConn.CloseRead:",
|
|
||||||
))
|
|
||||||
|
|
||||||
addCommands(cmd, dockerCli)
|
addCommands(cmd, &opt, dockerCli)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
type rootOptions struct {
|
type rootOptions struct {
|
||||||
builder string
|
builder string
|
||||||
|
debug bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||||
opts := &rootOptions{}
|
|
||||||
rootFlags(opts, cmd.PersistentFlags())
|
rootFlags(opts, cmd.PersistentFlags())
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
buildCmd(dockerCli, opts),
|
buildCmd(dockerCli, opts, nil),
|
||||||
bakeCmd(dockerCli, opts),
|
bakeCmd(dockerCli, opts),
|
||||||
createCmd(dockerCli),
|
createCmd(dockerCli),
|
||||||
|
dialStdioCmd(dockerCli, opts),
|
||||||
rmCmd(dockerCli, opts),
|
rmCmd(dockerCli, opts),
|
||||||
lsCmd(dockerCli),
|
lsCmd(dockerCli),
|
||||||
useCmd(dockerCli, opts),
|
useCmd(dockerCli, opts),
|
||||||
@@ -91,9 +97,11 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||||
)
|
)
|
||||||
if isExperimental() {
|
if confutil.IsExperimental() {
|
||||||
|
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||||
|
newDebuggableBuild(dockerCli, opts),
|
||||||
|
))
|
||||||
remote.AddControllerCommands(cmd, dockerCli)
|
remote.AddControllerCommands(cmd, dockerCli)
|
||||||
addDebugShellCommand(cmd, dockerCli)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.RegisterFlagCompletionFunc( //nolint:errcheck
|
cmd.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||||
@@ -104,4 +112,5 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|||||||
|
|
||||||
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
||||||
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
||||||
|
flags.BoolVarP(&options.debug, "debug", "D", debug.IsEnabled(), "Enable debug logging")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"github.com/docker/buildx/util/cobrautil/completion"
|
"github.com/docker/buildx/util/cobrautil/completion"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,9 +14,7 @@ type stopOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
func runStop(ctx context.Context, dockerCli command.Cli, in stopOptions) error {
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
b, err := builder.New(dockerCli,
|
b, err := builder.New(dockerCli,
|
||||||
builder.WithName(in.builder),
|
builder.WithName(in.builder),
|
||||||
builder.WithSkippedValidation(),
|
builder.WithSkippedValidation(),
|
||||||
@@ -25,7 +22,7 @@ func runStop(dockerCli command.Cli, in stopOptions) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
nodes, err := b.LoadNodes(ctx, false)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -45,7 +42,7 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
options.builder = args[0]
|
options.builder = args[0]
|
||||||
}
|
}
|
||||||
return runStop(dockerCli, options)
|
return runStop(cmd.Context(), dockerCli, options)
|
||||||
},
|
},
|
||||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
type uninstallOptions struct {
|
type uninstallOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runUninstall(dockerCli command.Cli, in uninstallOptions) error {
|
func runUninstall(_ command.Cli, _ uninstallOptions) error {
|
||||||
dir := config.Dir()
|
dir := config.Dir()
|
||||||
cfg, err := config.Load(dir)
|
cfg, err := config.Load(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -35,10 +35,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := txn.SetCurrent(ep, "", false, false); err != nil {
|
return txn.SetCurrent(ep, "", false, false)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
list, err := dockerCli.ContextStore().List()
|
list, err := dockerCli.ContextStore().List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -58,11 +55,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := txn.SetCurrent(ep, in.builder, in.isGlobal, in.isDefault); err != nil {
|
return txn.SetCurrent(ep, in.builder, in.isGlobal, in.isDefault)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
|
|||||||
57
commands/util.go
Normal file
57
commands/util.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/streams"
|
||||||
|
)
|
||||||
|
|
||||||
|
func prompt(ctx context.Context, ins io.Reader, out io.Writer, msg string) (bool, error) {
|
||||||
|
done := make(chan struct{})
|
||||||
|
var ok bool
|
||||||
|
go func() {
|
||||||
|
ok = promptForConfirmation(ins, out, msg)
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false, context.Cause(ctx)
|
||||||
|
case <-done:
|
||||||
|
return ok, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// promptForConfirmation requests and checks confirmation from user.
|
||||||
|
// This will display the provided message followed by ' [y/N] '. If
|
||||||
|
// the user input 'y' or 'Y' it returns true other false. If no
|
||||||
|
// message is provided "Are you sure you want to proceed? [y/N] "
|
||||||
|
// will be used instead.
|
||||||
|
//
|
||||||
|
// Copied from github.com/docker/cli since the upstream version changed
|
||||||
|
// recently with an incompatible change.
|
||||||
|
//
|
||||||
|
// See https://github.com/docker/buildx/pull/2359#discussion_r1544736494
|
||||||
|
// for discussion on the issue.
|
||||||
|
func promptForConfirmation(ins io.Reader, outs io.Writer, message string) bool {
|
||||||
|
if message == "" {
|
||||||
|
message = "Are you sure you want to proceed?"
|
||||||
|
}
|
||||||
|
message += " [y/N] "
|
||||||
|
|
||||||
|
_, _ = fmt.Fprint(outs, message)
|
||||||
|
|
||||||
|
// On Windows, force the use of the regular OS stdin stream.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
ins = streams.NewIn(os.Stdin)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bufio.NewReader(ins)
|
||||||
|
answer, _, _ := reader.ReadLine()
|
||||||
|
return strings.ToLower(string(answer)) == "y"
|
||||||
|
}
|
||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runVersion(dockerCli command.Cli) error {
|
func runVersion(_ command.Cli) error {
|
||||||
fmt.Println(version.Package, version.Version, version.Revision)
|
fmt.Println(version.Package, version.Version, version.Revision)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package build
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -19,9 +18,8 @@ import (
|
|||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
@@ -50,10 +48,12 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
Inputs: build.Inputs{
|
Inputs: build.Inputs{
|
||||||
ContextPath: in.ContextPath,
|
ContextPath: in.ContextPath,
|
||||||
DockerfilePath: in.DockerfileName,
|
DockerfilePath: in.DockerfileName,
|
||||||
InStream: inStream,
|
InStream: build.NewSyncMultiReader(inStream),
|
||||||
NamedContexts: contexts,
|
NamedContexts: contexts,
|
||||||
},
|
},
|
||||||
|
Ref: in.Ref,
|
||||||
BuildArgs: in.BuildArgs,
|
BuildArgs: in.BuildArgs,
|
||||||
|
CgroupParent: in.CgroupParent,
|
||||||
ExtraHosts: in.ExtraHosts,
|
ExtraHosts: in.ExtraHosts,
|
||||||
Labels: in.Labels,
|
Labels: in.Labels,
|
||||||
NetworkMode: in.NetworkMode,
|
NetworkMode: in.NetworkMode,
|
||||||
@@ -64,6 +64,8 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
Tags: in.Tags,
|
Tags: in.Tags,
|
||||||
Target: in.Target,
|
Target: in.Target,
|
||||||
Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits),
|
Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits),
|
||||||
|
GroupRef: in.GroupRef,
|
||||||
|
ProvenanceResponseMode: confutil.ParseMetadataProvenance(in.ProvenanceResponseMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(in.Platforms)
|
platforms, err := platformutil.Parse(in.Platforms)
|
||||||
@@ -72,8 +74,8 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
}
|
}
|
||||||
opts.Platforms = platforms
|
opts.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
dockerConfig := dockerCli.ConfigFile()
|
||||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig))
|
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
||||||
|
|
||||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -96,39 +98,51 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if in.ExportPush {
|
if in.ExportPush {
|
||||||
if in.ExportLoad {
|
var pushUsed bool
|
||||||
return nil, nil, errors.Errorf("push and load may not be set together at the moment")
|
for i := range outputs {
|
||||||
|
if outputs[i].Type == client.ExporterImage {
|
||||||
|
outputs[i].Attrs["push"] = "true"
|
||||||
|
pushUsed = true
|
||||||
}
|
}
|
||||||
if len(outputs) == 0 {
|
}
|
||||||
outputs = []client.ExportEntry{{
|
if !pushUsed {
|
||||||
Type: "image",
|
outputs = append(outputs, client.ExportEntry{
|
||||||
|
Type: client.ExporterImage,
|
||||||
Attrs: map[string]string{
|
Attrs: map[string]string{
|
||||||
"push": "true",
|
"push": "true",
|
||||||
},
|
},
|
||||||
}}
|
})
|
||||||
} else {
|
|
||||||
switch outputs[0].Type {
|
|
||||||
case "image":
|
|
||||||
outputs[0].Attrs["push"] = "true"
|
|
||||||
default:
|
|
||||||
return nil, nil, errors.Errorf("push and %q output can't be used together", outputs[0].Type)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if in.ExportLoad {
|
if in.ExportLoad {
|
||||||
if len(outputs) == 0 {
|
var loadUsed bool
|
||||||
outputs = []client.ExportEntry{{
|
for i := range outputs {
|
||||||
Type: "docker",
|
if outputs[i].Type == client.ExporterDocker {
|
||||||
|
if _, ok := outputs[i].Attrs["dest"]; !ok {
|
||||||
|
loadUsed = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !loadUsed {
|
||||||
|
outputs = append(outputs, client.ExportEntry{
|
||||||
|
Type: client.ExporterDocker,
|
||||||
Attrs: map[string]string{},
|
Attrs: map[string]string{},
|
||||||
}}
|
})
|
||||||
} else {
|
|
||||||
switch outputs[0].Type {
|
|
||||||
case "docker":
|
|
||||||
default:
|
|
||||||
return nil, nil, errors.Errorf("load and %q output can't be used together", outputs[0].Type)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
annotations, err := buildflags.ParseAnnotations(in.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "parse annotations")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, o := range outputs {
|
||||||
|
for k, v := range annotations {
|
||||||
|
o.Attrs[k.String()] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts.Exports = outputs
|
opts.Exports = outputs
|
||||||
|
|
||||||
opts.CacheFrom = controllerapi.CreateCaches(in.CacheFrom)
|
opts.CacheFrom = controllerapi.CreateCaches(in.CacheFrom)
|
||||||
@@ -144,10 +158,11 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
}
|
}
|
||||||
opts.Allow = allow
|
opts.Allow = allow
|
||||||
|
|
||||||
if in.PrintFunc != nil {
|
if in.CallFunc != nil {
|
||||||
opts.PrintFunc = &build.PrintFunc{
|
opts.CallFunc = &build.CallFunc{
|
||||||
Name: in.PrintFunc.Name,
|
Name: in.CallFunc.Name,
|
||||||
Format: in.PrintFunc.Format,
|
Format: in.CallFunc.Format,
|
||||||
|
IgnoreStatus: in.CallFunc.IgnoreStatus,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,12 +183,12 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||||
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
||||||
}
|
}
|
||||||
nodes, err := b.LoadNodes(ctx, false)
|
nodes, err := b.LoadNodes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
resp, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||||
err = wrapBuildError(err, false)
|
err = wrapBuildError(err, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||||
@@ -187,7 +202,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||||
// inspect the result and debug the cause of that error.
|
// inspect the result and debug the cause of that error.
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||||
var res *build.ResultHandle
|
var res *build.ResultHandle
|
||||||
var resp map[string]*client.SolveResponse
|
var resp map[string]*client.SolveResponse
|
||||||
var err error
|
var err error
|
||||||
@@ -254,9 +269,9 @@ func controllerUlimitOpt2DockerUlimit(u *controllerapi.UlimitOpt) *dockeropts.Ul
|
|||||||
if u == nil {
|
if u == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
values := make(map[string]*units.Ulimit)
|
values := make(map[string]*container.Ulimit)
|
||||||
for k, v := range u.Values {
|
for k, v := range u.Values {
|
||||||
values[k] = &units.Ulimit{
|
values[k] = &container.Ulimit{
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Hard: v.Hard,
|
Hard: v.Hard,
|
||||||
Soft: v.Soft,
|
Soft: v.Soft,
|
||||||
|
|||||||
@@ -273,7 +273,7 @@ func (m *BuildRequest) GetOptions() *BuildOptions {
|
|||||||
type BuildOptions struct {
|
type BuildOptions struct {
|
||||||
ContextPath string `protobuf:"bytes,1,opt,name=ContextPath,proto3" json:"ContextPath,omitempty"`
|
ContextPath string `protobuf:"bytes,1,opt,name=ContextPath,proto3" json:"ContextPath,omitempty"`
|
||||||
DockerfileName string `protobuf:"bytes,2,opt,name=DockerfileName,proto3" json:"DockerfileName,omitempty"`
|
DockerfileName string `protobuf:"bytes,2,opt,name=DockerfileName,proto3" json:"DockerfileName,omitempty"`
|
||||||
PrintFunc *PrintFunc `protobuf:"bytes,3,opt,name=PrintFunc,proto3" json:"PrintFunc,omitempty"`
|
CallFunc *CallFunc `protobuf:"bytes,3,opt,name=CallFunc,proto3" json:"CallFunc,omitempty"`
|
||||||
NamedContexts map[string]string `protobuf:"bytes,4,rep,name=NamedContexts,proto3" json:"NamedContexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
NamedContexts map[string]string `protobuf:"bytes,4,rep,name=NamedContexts,proto3" json:"NamedContexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
Allow []string `protobuf:"bytes,5,rep,name=Allow,proto3" json:"Allow,omitempty"`
|
Allow []string `protobuf:"bytes,5,rep,name=Allow,proto3" json:"Allow,omitempty"`
|
||||||
Attests []*Attest `protobuf:"bytes,6,rep,name=Attests,proto3" json:"Attests,omitempty"`
|
Attests []*Attest `protobuf:"bytes,6,rep,name=Attests,proto3" json:"Attests,omitempty"`
|
||||||
@@ -299,6 +299,10 @@ type BuildOptions struct {
|
|||||||
ExportPush bool `protobuf:"varint,26,opt,name=ExportPush,proto3" json:"ExportPush,omitempty"`
|
ExportPush bool `protobuf:"varint,26,opt,name=ExportPush,proto3" json:"ExportPush,omitempty"`
|
||||||
ExportLoad bool `protobuf:"varint,27,opt,name=ExportLoad,proto3" json:"ExportLoad,omitempty"`
|
ExportLoad bool `protobuf:"varint,27,opt,name=ExportLoad,proto3" json:"ExportLoad,omitempty"`
|
||||||
SourcePolicy *pb.Policy `protobuf:"bytes,28,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
|
SourcePolicy *pb.Policy `protobuf:"bytes,28,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
|
||||||
|
Ref string `protobuf:"bytes,29,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
||||||
|
GroupRef string `protobuf:"bytes,30,opt,name=GroupRef,proto3" json:"GroupRef,omitempty"`
|
||||||
|
Annotations []string `protobuf:"bytes,31,rep,name=Annotations,proto3" json:"Annotations,omitempty"`
|
||||||
|
ProvenanceResponseMode string `protobuf:"bytes,32,opt,name=ProvenanceResponseMode,proto3" json:"ProvenanceResponseMode,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
@@ -342,9 +346,9 @@ func (m *BuildOptions) GetDockerfileName() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *BuildOptions) GetPrintFunc() *PrintFunc {
|
func (m *BuildOptions) GetCallFunc() *CallFunc {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.PrintFunc
|
return m.CallFunc
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -524,6 +528,34 @@ func (m *BuildOptions) GetSourcePolicy() *pb.Policy {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *BuildOptions) GetRef() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Ref
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BuildOptions) GetGroupRef() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.GroupRef
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BuildOptions) GetAnnotations() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Annotations
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BuildOptions) GetProvenanceResponseMode() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.ProvenanceResponseMode
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type ExportEntry struct {
|
type ExportEntry struct {
|
||||||
Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
|
Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
|
||||||
Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
@@ -778,52 +810,60 @@ func (m *Secret) GetEnv() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
type PrintFunc struct {
|
type CallFunc struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||||
Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"`
|
Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"`
|
||||||
|
IgnoreStatus bool `protobuf:"varint,3,opt,name=IgnoreStatus,proto3" json:"IgnoreStatus,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PrintFunc) Reset() { *m = PrintFunc{} }
|
func (m *CallFunc) Reset() { *m = CallFunc{} }
|
||||||
func (m *PrintFunc) String() string { return proto.CompactTextString(m) }
|
func (m *CallFunc) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PrintFunc) ProtoMessage() {}
|
func (*CallFunc) ProtoMessage() {}
|
||||||
func (*PrintFunc) Descriptor() ([]byte, []int) {
|
func (*CallFunc) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_ed7f10298fa1d90f, []int{12}
|
return fileDescriptor_ed7f10298fa1d90f, []int{12}
|
||||||
}
|
}
|
||||||
func (m *PrintFunc) XXX_Unmarshal(b []byte) error {
|
func (m *CallFunc) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_PrintFunc.Unmarshal(m, b)
|
return xxx_messageInfo_CallFunc.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *PrintFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *CallFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_PrintFunc.Marshal(b, m, deterministic)
|
return xxx_messageInfo_CallFunc.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (m *PrintFunc) XXX_Merge(src proto.Message) {
|
func (m *CallFunc) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_PrintFunc.Merge(m, src)
|
xxx_messageInfo_CallFunc.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *PrintFunc) XXX_Size() int {
|
func (m *CallFunc) XXX_Size() int {
|
||||||
return xxx_messageInfo_PrintFunc.Size(m)
|
return xxx_messageInfo_CallFunc.Size(m)
|
||||||
}
|
}
|
||||||
func (m *PrintFunc) XXX_DiscardUnknown() {
|
func (m *CallFunc) XXX_DiscardUnknown() {
|
||||||
xxx_messageInfo_PrintFunc.DiscardUnknown(m)
|
xxx_messageInfo_CallFunc.DiscardUnknown(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
var xxx_messageInfo_PrintFunc proto.InternalMessageInfo
|
var xxx_messageInfo_CallFunc proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *PrintFunc) GetName() string {
|
func (m *CallFunc) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Name
|
return m.Name
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PrintFunc) GetFormat() string {
|
func (m *CallFunc) GetFormat() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Format
|
return m.Format
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *CallFunc) GetIgnoreStatus() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.IgnoreStatus
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
type InspectRequest struct {
|
type InspectRequest struct {
|
||||||
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
@@ -1527,6 +1567,7 @@ func (m *InitMessage) GetInvokeConfig() *InvokeConfig {
|
|||||||
type InvokeConfig struct {
|
type InvokeConfig struct {
|
||||||
Entrypoint []string `protobuf:"bytes,1,rep,name=Entrypoint,proto3" json:"Entrypoint,omitempty"`
|
Entrypoint []string `protobuf:"bytes,1,rep,name=Entrypoint,proto3" json:"Entrypoint,omitempty"`
|
||||||
Cmd []string `protobuf:"bytes,2,rep,name=Cmd,proto3" json:"Cmd,omitempty"`
|
Cmd []string `protobuf:"bytes,2,rep,name=Cmd,proto3" json:"Cmd,omitempty"`
|
||||||
|
NoCmd bool `protobuf:"varint,11,opt,name=NoCmd,proto3" json:"NoCmd,omitempty"`
|
||||||
Env []string `protobuf:"bytes,3,rep,name=Env,proto3" json:"Env,omitempty"`
|
Env []string `protobuf:"bytes,3,rep,name=Env,proto3" json:"Env,omitempty"`
|
||||||
User string `protobuf:"bytes,4,opt,name=User,proto3" json:"User,omitempty"`
|
User string `protobuf:"bytes,4,opt,name=User,proto3" json:"User,omitempty"`
|
||||||
NoUser bool `protobuf:"varint,5,opt,name=NoUser,proto3" json:"NoUser,omitempty"`
|
NoUser bool `protobuf:"varint,5,opt,name=NoUser,proto3" json:"NoUser,omitempty"`
|
||||||
@@ -1578,6 +1619,13 @@ func (m *InvokeConfig) GetCmd() []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *InvokeConfig) GetNoCmd() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.NoCmd
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (m *InvokeConfig) GetEnv() []string {
|
func (m *InvokeConfig) GetEnv() []string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Env
|
return m.Env
|
||||||
@@ -2014,7 +2062,7 @@ func init() {
|
|||||||
proto.RegisterType((*Attest)(nil), "buildx.controller.v1.Attest")
|
proto.RegisterType((*Attest)(nil), "buildx.controller.v1.Attest")
|
||||||
proto.RegisterType((*SSH)(nil), "buildx.controller.v1.SSH")
|
proto.RegisterType((*SSH)(nil), "buildx.controller.v1.SSH")
|
||||||
proto.RegisterType((*Secret)(nil), "buildx.controller.v1.Secret")
|
proto.RegisterType((*Secret)(nil), "buildx.controller.v1.Secret")
|
||||||
proto.RegisterType((*PrintFunc)(nil), "buildx.controller.v1.PrintFunc")
|
proto.RegisterType((*CallFunc)(nil), "buildx.controller.v1.CallFunc")
|
||||||
proto.RegisterType((*InspectRequest)(nil), "buildx.controller.v1.InspectRequest")
|
proto.RegisterType((*InspectRequest)(nil), "buildx.controller.v1.InspectRequest")
|
||||||
proto.RegisterType((*InspectResponse)(nil), "buildx.controller.v1.InspectResponse")
|
proto.RegisterType((*InspectResponse)(nil), "buildx.controller.v1.InspectResponse")
|
||||||
proto.RegisterType((*UlimitOpt)(nil), "buildx.controller.v1.UlimitOpt")
|
proto.RegisterType((*UlimitOpt)(nil), "buildx.controller.v1.UlimitOpt")
|
||||||
@@ -2046,125 +2094,130 @@ func init() {
|
|||||||
func init() { proto.RegisterFile("controller.proto", fileDescriptor_ed7f10298fa1d90f) }
|
func init() { proto.RegisterFile("controller.proto", fileDescriptor_ed7f10298fa1d90f) }
|
||||||
|
|
||||||
var fileDescriptor_ed7f10298fa1d90f = []byte{
|
var fileDescriptor_ed7f10298fa1d90f = []byte{
|
||||||
// 1881 bytes of a gzipped FileDescriptorProto
|
// 1961 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x6f, 0xdb, 0xc8,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x73, 0x1b, 0xb7,
|
||||||
0x11, 0x2f, 0x25, 0x59, 0x7f, 0x46, 0x96, 0xe3, 0x6c, 0x9d, 0x74, 0xc3, 0xa4, 0x17, 0x87, 0x49,
|
0x11, 0xef, 0x91, 0x14, 0xff, 0x2c, 0x45, 0xd9, 0x46, 0x6d, 0x17, 0x3e, 0x3b, 0xb6, 0x7c, 0xb6,
|
||||||
0xae, 0x42, 0x53, 0x48, 0x77, 0xbe, 0xa6, 0xbe, 0x5c, 0xee, 0x80, 0xda, 0xb2, 0x05, 0xfb, 0x90,
|
0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x9d, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6,
|
||||||
0xd8, 0xc6, 0xca, 0xc9, 0xa1, 0x2d, 0xd0, 0x80, 0x92, 0xd6, 0x32, 0x21, 0x8a, 0xab, 0x72, 0x57,
|
0xa4, 0x01, 0x65, 0x67, 0x9a, 0xce, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00,
|
||||||
0xb6, 0xd5, 0xa7, 0xbe, 0xf4, 0xad, 0xe8, 0xf7, 0x28, 0xfa, 0x11, 0xfa, 0xd2, 0x7e, 0xa1, 0xa2,
|
0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xa1, 0xdf,
|
||||||
0x1f, 0xa1, 0xd8, 0x3f, 0xa4, 0x48, 0x4b, 0x94, 0xed, 0xf6, 0x49, 0x3b, 0xc3, 0xdf, 0x6f, 0x76,
|
0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3c, 0x11, 0xbb, 0xf8, 0xed,
|
||||||
0x67, 0x38, 0x3b, 0x33, 0x14, 0xac, 0xf7, 0x58, 0x20, 0x42, 0xe6, 0xfb, 0x34, 0x6c, 0x8c, 0x43,
|
0x2e, 0x76, 0x6f, 0xb1, 0xbb, 0x20, 0xdc, 0xee, 0x89, 0x48, 0xc5, 0x22, 0x0c, 0x79, 0xdc, 0x1a,
|
||||||
0x26, 0x18, 0xda, 0xe8, 0x4e, 0x3c, 0xbf, 0x7f, 0xd5, 0x48, 0x3c, 0xb8, 0xf8, 0xd2, 0x7e, 0x3b,
|
0xc6, 0x42, 0x09, 0x72, 0xf7, 0x60, 0x14, 0x84, 0xfd, 0xf3, 0x56, 0x66, 0xe3, 0xf4, 0x73, 0xf7,
|
||||||
0xf0, 0xc4, 0xf9, 0xa4, 0xdb, 0xe8, 0xb1, 0x51, 0x73, 0xc4, 0xba, 0xd3, 0xa6, 0x42, 0x0d, 0x3d,
|
0xed, 0x20, 0x50, 0x47, 0xa3, 0x83, 0x56, 0x4f, 0x9c, 0xac, 0x9c, 0x88, 0x83, 0x8b, 0x15, 0x44,
|
||||||
0xd1, 0x74, 0xc7, 0x5e, 0x93, 0xd3, 0xf0, 0xc2, 0xeb, 0x51, 0xde, 0x34, 0xa4, 0xe8, 0x57, 0x9b,
|
0x1d, 0x07, 0x6a, 0xc5, 0x1f, 0x06, 0x2b, 0x92, 0xc7, 0xa7, 0x41, 0x8f, 0xcb, 0x15, 0x2b, 0x94,
|
||||||
0xb4, 0x5f, 0x67, 0x92, 0x39, 0x9b, 0x84, 0x3d, 0x3a, 0x66, 0xbe, 0xd7, 0x9b, 0x36, 0xc7, 0xdd,
|
0xfc, 0x1a, 0x95, 0xee, 0xab, 0x5c, 0x61, 0x29, 0x46, 0x71, 0x8f, 0x0f, 0x45, 0x18, 0xf4, 0x2e,
|
||||||
0xa6, 0x5e, 0x69, 0x9a, 0x53, 0x87, 0x8d, 0x77, 0x1e, 0x17, 0x27, 0x21, 0xeb, 0x51, 0xce, 0x29,
|
0x56, 0x86, 0x07, 0x2b, 0x66, 0x65, 0xc4, 0xbc, 0x26, 0xdc, 0x7d, 0x17, 0x48, 0xb5, 0x17, 0x8b,
|
||||||
0x27, 0xf4, 0x0f, 0x13, 0xca, 0x05, 0x5a, 0x87, 0x3c, 0xa1, 0x67, 0xd8, 0xda, 0xb4, 0xea, 0x15,
|
0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x38, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a,
|
||||||
0x22, 0x97, 0xce, 0x09, 0x3c, 0xb8, 0x86, 0xe4, 0x63, 0x16, 0x70, 0x8a, 0xb6, 0x61, 0xe5, 0x30,
|
0xcb, 0x4e, 0xb3, 0xc6, 0xf4, 0xd2, 0xdb, 0x83, 0x7b, 0x97, 0x90, 0x72, 0x28, 0x22, 0xc9, 0xc9,
|
||||||
0x38, 0x63, 0x1c, 0x5b, 0x9b, 0xf9, 0x7a, 0x75, 0xeb, 0x59, 0x63, 0x91, 0x73, 0x0d, 0xc3, 0x93,
|
0x6b, 0x58, 0xd8, 0x8e, 0x0e, 0x85, 0xa4, 0xce, 0x72, 0xb1, 0x59, 0x5f, 0x7d, 0xda, 0x9a, 0xe5,
|
||||||
0x48, 0xa2, 0xf1, 0x0e, 0x87, 0x6a, 0x42, 0x8b, 0x9e, 0x40, 0x25, 0x12, 0xf7, 0xcc, 0xc6, 0x33,
|
0x5c, 0xcb, 0xca, 0x69, 0x24, 0x33, 0x78, 0x4f, 0x42, 0x3d, 0xc3, 0x25, 0x8f, 0xa0, 0x96, 0x90,
|
||||||
0x05, 0x6a, 0xc3, 0xea, 0x61, 0x70, 0xc1, 0x86, 0xb4, 0xc5, 0x82, 0x33, 0x6f, 0x80, 0x73, 0x9b,
|
0x1b, 0xd6, 0xf0, 0x98, 0x41, 0x3a, 0xb0, 0xb8, 0x1d, 0x9d, 0x8a, 0x63, 0xde, 0x16, 0xd1, 0x61,
|
||||||
0x56, 0xbd, 0xba, 0xe5, 0x2c, 0xde, 0x2c, 0x89, 0x24, 0x29, 0x9e, 0xf3, 0x3d, 0xe0, 0x3d, 0x8f,
|
0x30, 0xa0, 0x85, 0x65, 0xa7, 0x59, 0x5f, 0xf5, 0x66, 0x1b, 0xcb, 0x22, 0xd9, 0x84, 0x9c, 0xf7,
|
||||||
0xf7, 0x58, 0x10, 0xd0, 0x5e, 0xe4, 0x4c, 0xa6, 0xd3, 0xe9, 0x33, 0xe5, 0xae, 0x9d, 0xc9, 0x79,
|
0x2d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a,
|
||||||
0x0c, 0x8f, 0x16, 0xd8, 0xd2, 0x61, 0x71, 0x7e, 0x0f, 0xab, 0xbb, 0xf2, 0x6c, 0xd9, 0xc6, 0xbf,
|
0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x01, 0x16, 0xd7, 0xf5,
|
||||||
0x85, 0xd2, 0xf1, 0x58, 0x78, 0x2c, 0xe0, 0xcb, 0xbd, 0x51, 0x66, 0x0c, 0x92, 0x44, 0x14, 0xe7,
|
0xd9, 0xf2, 0x95, 0x7f, 0x03, 0x95, 0xdd, 0xa1, 0x0a, 0x44, 0x24, 0xe7, 0x7b, 0x83, 0x6a, 0x2c,
|
||||||
0x9f, 0x55, 0xb3, 0x81, 0x51, 0xa0, 0x4d, 0xa8, 0xb6, 0x58, 0x20, 0xe8, 0x95, 0x38, 0x71, 0xc5,
|
0x92, 0x25, 0x22, 0xde, 0x7f, 0x16, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1,
|
||||||
0xb9, 0xd9, 0x28, 0xa9, 0x42, 0x9f, 0xc3, 0xda, 0x1e, 0xeb, 0x0d, 0x69, 0x78, 0xe6, 0xf9, 0xf4,
|
0x73, 0xb5, 0xe7, 0xab, 0x23, 0x6b, 0x28, 0xcb, 0x22, 0x9f, 0xc2, 0xd2, 0x86, 0xe8, 0x1d, 0xf3,
|
||||||
0xc8, 0x1d, 0x51, 0xe3, 0xd2, 0x35, 0x2d, 0xfa, 0x4e, 0x7a, 0xed, 0x05, 0xa2, 0x3d, 0x09, 0x7a,
|
0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x6b, 0xa8, 0xb6, 0xfd,
|
||||||
0x38, 0xaf, 0x8e, 0xf6, 0x34, 0xeb, 0xad, 0x1a, 0x18, 0x99, 0x31, 0xd0, 0xef, 0xa0, 0x26, 0xcd,
|
0x30, 0xec, 0x8c, 0xa2, 0x1e, 0x2d, 0xe2, 0xc9, 0x1e, 0xcf, 0x3e, 0x59, 0x82, 0x62, 0x29, 0x9e,
|
||||||
0xf4, 0xcd, 0xd6, 0x1c, 0x17, 0x54, 0x62, 0xbc, 0xbe, 0xd9, 0xbb, 0x46, 0x8a, 0xb7, 0x1f, 0x88,
|
0xfc, 0x1e, 0x1a, 0x5a, 0x47, 0xdf, 0xda, 0x95, 0xb4, 0x84, 0x59, 0xf1, 0xea, 0x6a, 0xd7, 0x5a,
|
||||||
0x70, 0x4a, 0xd2, 0xb6, 0xd0, 0x06, 0xac, 0xec, 0xf8, 0x3e, 0xbb, 0xc4, 0x2b, 0x9b, 0xf9, 0x7a,
|
0x13, 0x72, 0x9b, 0x91, 0x8a, 0x2f, 0xd8, 0xa4, 0x2e, 0x72, 0x17, 0x16, 0xd6, 0xc2, 0x50, 0x9c,
|
||||||
0x85, 0x68, 0x01, 0xfd, 0x0a, 0x4a, 0x3b, 0x42, 0x50, 0x2e, 0x38, 0x2e, 0xaa, 0xcd, 0x9e, 0x2c,
|
0xd1, 0x85, 0xe5, 0x62, 0xb3, 0xc6, 0x0c, 0x41, 0xbe, 0x84, 0xca, 0x9a, 0x52, 0x5c, 0x2a, 0x49,
|
||||||
0xde, 0x4c, 0x83, 0x48, 0x04, 0x46, 0xc7, 0x50, 0x51, 0xfb, 0xef, 0x84, 0x03, 0x8e, 0x4b, 0x8a,
|
0xcb, 0x68, 0xec, 0xd1, 0x6c, 0x63, 0x06, 0xc4, 0x12, 0x30, 0xd9, 0x85, 0x1a, 0xda, 0x5f, 0x8b,
|
||||||
0xf9, 0xe5, 0x2d, 0x8e, 0x19, 0x73, 0xf4, 0x11, 0x67, 0x36, 0xd0, 0x3e, 0x54, 0x5a, 0x6e, 0xef,
|
0x07, 0x92, 0x56, 0x50, 0xf2, 0xf3, 0x6b, 0x1c, 0x33, 0x95, 0x31, 0x47, 0x1c, 0xeb, 0x20, 0x9b,
|
||||||
0x9c, 0xb6, 0x43, 0x36, 0xc2, 0x65, 0x65, 0xf0, 0x67, 0x8b, 0x0d, 0x2a, 0x98, 0x31, 0x68, 0xcc,
|
0x50, 0x6b, 0xfb, 0xbd, 0x23, 0xde, 0x89, 0xc5, 0x09, 0xad, 0xa2, 0xc2, 0x9f, 0xe7, 0x05, 0xae,
|
||||||
0xc4, 0x4c, 0xb4, 0x03, 0x25, 0x25, 0x9c, 0x32, 0x5c, 0xb9, 0x9b, 0x91, 0x88, 0x87, 0x1c, 0x58,
|
0x77, 0xc4, 0xad, 0x42, 0xab, 0x26, 0x95, 0x24, 0x6b, 0x50, 0x41, 0x62, 0x5f, 0xd0, 0xda, 0xcd,
|
||||||
0x6d, 0x0d, 0x42, 0x36, 0x19, 0x9f, 0xb8, 0x21, 0x0d, 0x04, 0x06, 0xf5, 0xaa, 0x53, 0x3a, 0xf4,
|
0x94, 0x24, 0x72, 0xc4, 0x83, 0xc5, 0xf6, 0x20, 0x16, 0xa3, 0xe1, 0x9e, 0x1f, 0xf3, 0x48, 0x51,
|
||||||
0x16, 0x4a, 0xfb, 0x57, 0x63, 0x16, 0x0a, 0x8e, 0xab, 0xcb, 0x2e, 0xaf, 0x06, 0x99, 0x0d, 0x0c,
|
0xc0, 0xef, 0x3c, 0xc1, 0x23, 0x6f, 0xa1, 0xb2, 0x79, 0x3e, 0x14, 0xb1, 0x92, 0xb4, 0x3e, 0xef,
|
||||||
0x03, 0x7d, 0x06, 0xb0, 0x7f, 0x25, 0x42, 0xf7, 0x80, 0xc9, 0xb0, 0xaf, 0xaa, 0xd7, 0x91, 0xd0,
|
0xe6, 0x1a, 0x90, 0x35, 0x60, 0x25, 0xc8, 0x63, 0x80, 0xcd, 0x73, 0x15, 0xfb, 0x5b, 0x42, 0x87,
|
||||||
0xa0, 0x36, 0x14, 0xdf, 0xb9, 0x5d, 0xea, 0x73, 0x5c, 0x53, 0xb6, 0x1b, 0xb7, 0x08, 0xac, 0x26,
|
0x7d, 0x11, 0x3f, 0x47, 0x86, 0x43, 0x3a, 0x50, 0x7e, 0xe7, 0x1f, 0xf0, 0x50, 0xd2, 0x06, 0xea,
|
||||||
0xe8, 0x8d, 0x0c, 0x5b, 0xe6, 0xf5, 0x11, 0x15, 0x97, 0x2c, 0x1c, 0xbe, 0x67, 0x7d, 0x8a, 0xd7,
|
0x6e, 0x5d, 0x23, 0xb0, 0x46, 0xc0, 0x18, 0xb2, 0xd2, 0x3a, 0xa9, 0x77, 0xb8, 0x3a, 0x13, 0xf1,
|
||||||
0x74, 0x5e, 0x27, 0x54, 0xe8, 0x05, 0xd4, 0x8e, 0x98, 0x0e, 0x9e, 0xe7, 0x0b, 0x1a, 0xe2, 0x7b,
|
0xf1, 0x7b, 0xd1, 0xe7, 0x74, 0xc9, 0x24, 0x75, 0x86, 0x45, 0x9e, 0x43, 0x63, 0x47, 0x98, 0xe0,
|
||||||
0xea, 0x30, 0x69, 0xa5, 0xba, 0xcb, 0xbe, 0x2b, 0xce, 0x58, 0x38, 0xe2, 0x78, 0x5d, 0x21, 0x66,
|
0x05, 0xa1, 0xe2, 0x31, 0xbd, 0x85, 0x87, 0x99, 0x64, 0xe2, 0x45, 0x0e, 0x7d, 0x75, 0x28, 0xe2,
|
||||||
0x0a, 0x99, 0x41, 0x1d, 0xda, 0x0b, 0xa9, 0xe0, 0xf8, 0xfe, 0xb2, 0x0c, 0xd2, 0x20, 0x12, 0x81,
|
0x13, 0x49, 0x6f, 0x23, 0x62, 0xcc, 0xd0, 0x19, 0xd4, 0xe5, 0xbd, 0x98, 0x2b, 0x49, 0xef, 0xcc,
|
||||||
0x11, 0x86, 0x52, 0xe7, 0x7c, 0xd4, 0xf1, 0xfe, 0x48, 0x31, 0xda, 0xb4, 0xea, 0x79, 0x12, 0x89,
|
0xcb, 0x20, 0x03, 0x62, 0x09, 0x98, 0x50, 0xa8, 0x74, 0x8f, 0x4e, 0xba, 0xc1, 0x9f, 0x38, 0x25,
|
||||||
0xe8, 0x15, 0xe4, 0x3b, 0x9d, 0x03, 0xfc, 0x63, 0x65, 0xed, 0x51, 0x86, 0xb5, 0xce, 0x01, 0x91,
|
0xcb, 0x4e, 0xb3, 0xc8, 0x12, 0x92, 0xbc, 0x84, 0x62, 0xb7, 0xbb, 0x45, 0x7f, 0x8a, 0xda, 0x1e,
|
||||||
0x28, 0x84, 0xa0, 0x70, 0xea, 0x0e, 0x38, 0xde, 0x50, 0xe7, 0x52, 0x6b, 0xf4, 0x10, 0x8a, 0xa7,
|
0xe4, 0x68, 0xeb, 0x6e, 0x31, 0x8d, 0x22, 0x04, 0x4a, 0xfb, 0xfe, 0x40, 0xd2, 0xbb, 0x78, 0x2e,
|
||||||
0x6e, 0x38, 0xa0, 0x02, 0x3f, 0x50, 0x3e, 0x1b, 0x09, 0xbd, 0x81, 0xd2, 0x07, 0xdf, 0x1b, 0x79,
|
0x5c, 0x93, 0xfb, 0x50, 0xde, 0xf7, 0xe3, 0x01, 0x57, 0xf4, 0x1e, 0xfa, 0x6c, 0x29, 0xf2, 0x06,
|
||||||
0x82, 0xe3, 0x87, 0xcb, 0x2e, 0xa7, 0x06, 0x1d, 0x8f, 0x05, 0x89, 0xf0, 0xf2, 0xb4, 0x2a, 0xde,
|
0x2a, 0x1f, 0xc2, 0xe0, 0x24, 0x50, 0x92, 0xde, 0xc7, 0xab, 0xf9, 0x64, 0xb6, 0x72, 0x03, 0xda,
|
||||||
0x34, 0xc4, 0x3f, 0x51, 0x36, 0x23, 0x51, 0x3e, 0x31, 0xe1, 0xc2, 0x78, 0xd3, 0xaa, 0x97, 0x49,
|
0x1d, 0x2a, 0x96, 0xe0, 0xf5, 0x69, 0x31, 0xde, 0x3c, 0xa6, 0x3f, 0x43, 0x9d, 0x09, 0xa9, 0x77,
|
||||||
0x24, 0xca, 0xa3, 0x9d, 0x4c, 0x7c, 0x1f, 0x3f, 0x52, 0x6a, 0xb5, 0xd6, 0xef, 0x5e, 0xa6, 0xc1,
|
0x6c, 0xb8, 0x28, 0x5d, 0x76, 0x9a, 0x55, 0x96, 0x90, 0xfa, 0x68, 0x7b, 0xa3, 0x30, 0xa4, 0x0f,
|
||||||
0xc9, 0x84, 0x9f, 0x63, 0x5b, 0x3d, 0x49, 0x68, 0x66, 0xcf, 0xdf, 0x31, 0xb7, 0x8f, 0x1f, 0x27,
|
0x90, 0x8d, 0x6b, 0xf3, 0xed, 0x75, 0x1a, 0xec, 0x8d, 0xe4, 0x11, 0x75, 0x71, 0x27, 0xc3, 0x19,
|
||||||
0x9f, 0x4b, 0x0d, 0x3a, 0x84, 0xd5, 0x8e, 0x6a, 0x4b, 0x27, 0xaa, 0x19, 0xe1, 0x27, 0xca, 0x8f,
|
0xef, 0xbf, 0x13, 0x7e, 0x9f, 0x3e, 0xcc, 0xee, 0x6b, 0x0e, 0xd9, 0x86, 0xc5, 0x2e, 0xf6, 0xa4,
|
||||||
0x97, 0x0d, 0xd9, 0xb9, 0x1a, 0x51, 0xe7, 0x92, 0x3e, 0x24, 0x9b, 0x57, 0x43, 0x83, 0x49, 0x8a,
|
0x3d, 0xec, 0x44, 0xf4, 0x11, 0xfa, 0xf1, 0xa2, 0xa5, 0xdb, 0x56, 0x2b, 0x69, 0x5b, 0xda, 0x87,
|
||||||
0x6a, 0xff, 0x1a, 0xd0, 0x7c, 0xd5, 0x90, 0xd5, 0x76, 0x48, 0xa7, 0x51, 0xb5, 0x1d, 0xd2, 0xa9,
|
0x6c, 0xe7, 0x6a, 0x19, 0x30, 0x9b, 0x10, 0x4d, 0x8a, 0xea, 0x27, 0xe3, 0xa2, 0xea, 0x42, 0xf5,
|
||||||
0x2c, 0x1c, 0x17, 0xae, 0x3f, 0x89, 0x6a, 0x9e, 0x16, 0xbe, 0xc9, 0x7d, 0x6d, 0xd9, 0xdf, 0xc2,
|
0xb7, 0x3a, 0xc9, 0x35, 0xfb, 0x31, 0xb2, 0x53, 0x5a, 0x27, 0xd3, 0x5a, 0x14, 0x09, 0xe5, 0x9b,
|
||||||
0x5a, 0xfa, 0x42, 0xdf, 0x89, 0xfd, 0x06, 0xaa, 0x89, 0xac, 0xbd, 0x0b, 0xd5, 0xf9, 0x97, 0x05,
|
0xa2, 0xfb, 0x04, 0xc3, 0x9d, 0x65, 0x91, 0x2f, 0xe1, 0xfe, 0x5e, 0x2c, 0x4e, 0x79, 0xe4, 0x47,
|
||||||
0xd5, 0xc4, 0xd5, 0x52, 0x49, 0x30, 0x1d, 0x53, 0x43, 0x56, 0x6b, 0xb4, 0x0b, 0x2b, 0x3b, 0x42,
|
0x3d, 0x9e, 0x94, 0x72, 0xcc, 0xbc, 0x65, 0xd4, 0x95, 0xb3, 0xeb, 0xfe, 0x06, 0xc8, 0x74, 0xf5,
|
||||||
0x84, 0xb2, 0x45, 0xc8, 0x3c, 0xfa, 0xc5, 0x8d, 0x17, 0xb4, 0xa1, 0xe0, 0xfa, 0x0a, 0x69, 0xaa,
|
0xd2, 0xa7, 0x3b, 0xe6, 0x17, 0x49, 0xc9, 0x3f, 0xe6, 0x17, 0xba, 0x80, 0x9d, 0xfa, 0xe1, 0x28,
|
||||||
0xbc, 0x41, 0x7b, 0x94, 0x0b, 0x2f, 0x70, 0xe5, 0x2d, 0x53, 0x15, 0xbd, 0x42, 0x92, 0x2a, 0xfb,
|
0x29, 0xbc, 0x86, 0xf8, 0xba, 0xf0, 0x95, 0xe3, 0x7e, 0x03, 0x4b, 0x93, 0x85, 0xe5, 0x46, 0xd2,
|
||||||
0x6b, 0x80, 0x19, 0xed, 0x4e, 0x3e, 0xfc, 0xdd, 0x82, 0xfb, 0x73, 0x55, 0x68, 0xa1, 0x27, 0x07,
|
0x6f, 0xa0, 0x9e, 0xb9, 0x3d, 0x37, 0x11, 0xf5, 0xfe, 0xed, 0x40, 0x3d, 0x73, 0xc5, 0x31, 0x19,
|
||||||
0x69, 0x4f, 0xb6, 0x6e, 0x59, 0xd1, 0xe6, 0xfd, 0xf9, 0x3f, 0x4e, 0x7b, 0x04, 0x45, 0x5d, 0xfa,
|
0x2f, 0x86, 0xdc, 0x0a, 0xe3, 0x9a, 0xac, 0xc3, 0xc2, 0x9a, 0x52, 0xb1, 0xee, 0x53, 0x3a, 0x9f,
|
||||||
0x17, 0x9e, 0xd0, 0x86, 0xf2, 0x9e, 0xc7, 0xdd, 0xae, 0x4f, 0xfb, 0x8a, 0x5a, 0x26, 0xb1, 0xac,
|
0x7f, 0x79, 0x65, 0xa1, 0x68, 0x21, 0xdc, 0x5c, 0x65, 0x23, 0xaa, 0x83, 0xbf, 0xc1, 0xa5, 0x0a,
|
||||||
0xfa, 0x8e, 0x3a, 0xbd, 0x8e, 0x9e, 0x16, 0x1c, 0x7d, 0xc7, 0xd1, 0x1a, 0xe4, 0xe2, 0x99, 0x25,
|
0x22, 0x0c, 0x35, 0xf6, 0x95, 0x1a, 0xcb, 0xb2, 0xdc, 0xaf, 0x00, 0xc6, 0x62, 0x37, 0xf2, 0xe1,
|
||||||
0x77, 0xb8, 0x27, 0xc1, 0xb2, 0xe1, 0x6a, 0x57, 0x2b, 0x44, 0x0b, 0x4e, 0x1b, 0x8a, 0xba, 0x6a,
|
0x9f, 0x0e, 0xdc, 0x99, 0xaa, 0x86, 0x33, 0x3d, 0xd9, 0x9a, 0xf4, 0x64, 0xf5, 0x9a, 0x95, 0x75,
|
||||||
0xcc, 0xe1, 0x6d, 0x28, 0xb7, 0x3d, 0x9f, 0xaa, 0xbe, 0xad, 0xcf, 0x1c, 0xcb, 0xd2, 0xbd, 0xfd,
|
0xda, 0x9f, 0x1f, 0x71, 0xda, 0x1d, 0x28, 0x9b, 0x16, 0x34, 0xf3, 0x84, 0x2e, 0x54, 0x37, 0x02,
|
||||||
0xe0, 0xc2, 0x6c, 0x2b, 0x97, 0xce, 0x76, 0xa2, 0x3d, 0x4b, 0x3f, 0x54, 0x27, 0x37, 0x7e, 0xa8,
|
0xe9, 0x1f, 0x84, 0xbc, 0x8f, 0xa2, 0x55, 0x96, 0xd2, 0xd8, 0xff, 0xf0, 0xf4, 0x26, 0x7a, 0x86,
|
||||||
0xfe, 0xfd, 0x10, 0x8a, 0x6d, 0x16, 0x8e, 0x5c, 0x61, 0x8c, 0x19, 0xc9, 0x71, 0x60, 0xed, 0x30,
|
0xf0, 0x4c, 0xad, 0x21, 0x4b, 0x50, 0x48, 0x07, 0xa7, 0xc2, 0xf6, 0x86, 0x06, 0xeb, 0xae, 0x6f,
|
||||||
0xe0, 0x63, 0xda, 0x13, 0xd9, 0x63, 0xde, 0x31, 0xdc, 0x8b, 0x31, 0x66, 0xc0, 0x4b, 0xcc, 0x29,
|
0x5c, 0xad, 0x31, 0x43, 0x78, 0x1d, 0x28, 0x9b, 0xea, 0x35, 0x85, 0x77, 0xa1, 0xda, 0x09, 0x42,
|
||||||
0xd6, 0xdd, 0xe7, 0x94, 0xbf, 0x59, 0x50, 0x89, 0x2b, 0x11, 0x6a, 0x41, 0x51, 0xbd, 0x8d, 0x68,
|
0x8e, 0xc3, 0x83, 0x39, 0x73, 0x4a, 0x6b, 0xf7, 0x36, 0xa3, 0x53, 0x6b, 0x56, 0x2f, 0xbd, 0xef,
|
||||||
0x5a, 0x7c, 0x75, 0x43, 0xe9, 0x6a, 0x7c, 0x54, 0x68, 0xd3, 0x11, 0x34, 0xd5, 0xfe, 0x01, 0xaa,
|
0xc7, 0x33, 0x82, 0x76, 0x03, 0xa7, 0x09, 0xeb, 0x06, 0xce, 0x10, 0xf7, 0xa1, 0xdc, 0x11, 0xf1,
|
||||||
0x09, 0xf5, 0x82, 0x04, 0xd8, 0x4a, 0x26, 0x40, 0x66, 0x29, 0xd7, 0x9b, 0x24, 0xd3, 0x63, 0x0f,
|
0x89, 0xaf, 0xac, 0x2e, 0x4b, 0xe9, 0xce, 0xb4, 0x3d, 0x88, 0x44, 0xcc, 0xbb, 0xca, 0x57, 0x23,
|
||||||
0x8a, 0x5a, 0xb9, 0x30, 0xac, 0x08, 0x0a, 0x07, 0x6e, 0xa8, 0x53, 0x23, 0x4f, 0xd4, 0x5a, 0xea,
|
0xe3, 0x49, 0x95, 0x4d, 0xf0, 0x3c, 0x0f, 0x96, 0xb6, 0x23, 0x39, 0xe4, 0x3d, 0x95, 0x3f, 0x8e,
|
||||||
0x3a, 0xec, 0x4c, 0xa8, 0xd7, 0x93, 0x27, 0x6a, 0xed, 0xfc, 0xc3, 0x82, 0x9a, 0x19, 0xfd, 0x4c,
|
0xee, 0xc2, 0xad, 0x14, 0x63, 0x07, 0xd1, 0xcc, 0x3c, 0xe5, 0xdc, 0x7c, 0x9e, 0xfa, 0x87, 0x03,
|
||||||
0x04, 0x29, 0xac, 0xeb, 0x1b, 0x4a, 0xc3, 0x48, 0x67, 0xfc, 0x7f, 0xb3, 0x24, 0x94, 0x11, 0xb4,
|
0xb5, 0xb4, 0x68, 0x92, 0x36, 0x94, 0xf1, 0x83, 0x25, 0x53, 0xed, 0xcb, 0x2b, 0xaa, 0x6c, 0xeb,
|
||||||
0x71, 0x9d, 0xab, 0xa3, 0x31, 0x67, 0xd2, 0x6e, 0xc1, 0x83, 0x85, 0xd0, 0x3b, 0x5d, 0x91, 0x97,
|
0x23, 0xa2, 0x6d, 0xf3, 0x32, 0xa2, 0xee, 0x77, 0x50, 0xcf, 0xb0, 0x67, 0xe4, 0xc8, 0x6a, 0x36,
|
||||||
0x70, 0x7f, 0x36, 0xd4, 0x66, 0xe7, 0xc9, 0x06, 0xa0, 0x24, 0xcc, 0x0c, 0xbd, 0x4f, 0xa1, 0x2a,
|
0x47, 0x72, 0xbb, 0x8e, 0x31, 0x92, 0xcd, 0xa0, 0x0d, 0x28, 0x1b, 0xe6, 0xcc, 0xd0, 0x13, 0x28,
|
||||||
0x3f, 0x12, 0xb2, 0x69, 0x0e, 0xac, 0x6a, 0x80, 0x89, 0x0c, 0x82, 0xc2, 0x90, 0x4e, 0x75, 0x36,
|
0x6d, 0xf9, 0xb1, 0xc9, 0x9e, 0x22, 0xc3, 0xb5, 0xe6, 0x75, 0xc5, 0xa1, 0xc2, 0x70, 0x17, 0x19,
|
||||||
0x54, 0x88, 0x5a, 0x3b, 0x7f, 0xb5, 0xe4, 0xac, 0x3f, 0x9e, 0x88, 0xf7, 0x94, 0x73, 0x77, 0x20,
|
0xae, 0xbd, 0x7f, 0x39, 0xd0, 0xb0, 0x23, 0xaa, 0x8d, 0x20, 0x87, 0xdb, 0xe6, 0x12, 0xf3, 0x38,
|
||||||
0x13, 0xb0, 0x70, 0x18, 0x78, 0xc2, 0x64, 0xdf, 0xe7, 0x59, 0x33, 0xff, 0x78, 0x22, 0x24, 0xcc,
|
0xe1, 0x59, 0xff, 0xdf, 0xcc, 0x09, 0x65, 0x02, 0x6d, 0x5d, 0x96, 0x35, 0xd1, 0x98, 0x52, 0xe9,
|
||||||
0xb0, 0x0e, 0x7e, 0x44, 0x14, 0x0b, 0x6d, 0x43, 0x61, 0xcf, 0x15, 0xae, 0xc9, 0x85, 0x8c, 0x09,
|
0xb6, 0xe1, 0xde, 0x4c, 0xe8, 0x8d, 0x6e, 0xd1, 0x0b, 0xb8, 0x33, 0x1e, 0xbe, 0xf3, 0xf3, 0xe4,
|
||||||
0x47, 0x22, 0x12, 0x44, 0x29, 0xee, 0x96, 0xe4, 0x87, 0xcd, 0x78, 0x22, 0x9c, 0x17, 0xb0, 0x7e,
|
0x2e, 0x90, 0x2c, 0xcc, 0x0e, 0xe7, 0x4f, 0xa0, 0xae, 0x1f, 0x33, 0xf9, 0x62, 0x1e, 0x2c, 0x1a,
|
||||||
0xdd, 0xfa, 0x02, 0xd7, 0xbe, 0x82, 0x6a, 0xc2, 0x8a, 0xba, 0xb7, 0xc7, 0x6d, 0x05, 0x28, 0x13,
|
0x80, 0x8d, 0x0c, 0x81, 0xd2, 0x31, 0xbf, 0x30, 0xd9, 0x50, 0x63, 0xb8, 0xf6, 0xfe, 0xee, 0xe8,
|
||||||
0xb9, 0x94, 0xbe, 0xc6, 0x07, 0x59, 0xd5, 0x7b, 0x38, 0xf7, 0xa0, 0xa6, 0x4c, 0xc7, 0x11, 0xfc,
|
0x37, 0xc9, 0x70, 0xa4, 0xde, 0x73, 0x29, 0xfd, 0x81, 0x4e, 0xc0, 0xd2, 0x76, 0x14, 0x28, 0x9b,
|
||||||
0x53, 0x0e, 0x4a, 0x91, 0x89, 0xed, 0x94, 0xdf, 0xcf, 0xb2, 0xfc, 0x9e, 0x77, 0xf9, 0x35, 0x14,
|
0x7d, 0x9f, 0xe6, 0xbd, 0x4d, 0x86, 0x23, 0xa5, 0x61, 0x56, 0x6a, 0xeb, 0x27, 0x0c, 0xa5, 0xc8,
|
||||||
0x64, 0xfd, 0x30, 0x2e, 0x67, 0x8c, 0x07, 0xed, 0x7e, 0x82, 0x26, 0xe1, 0xe8, 0x3b, 0x28, 0x12,
|
0x6b, 0x28, 0x6d, 0xf8, 0xca, 0xb7, 0xb9, 0x90, 0x33, 0x8c, 0x69, 0x44, 0x46, 0x50, 0x93, 0xeb,
|
||||||
0xca, 0xe5, 0x28, 0xa3, 0x87, 0xfe, 0xe7, 0x8b, 0x89, 0x1a, 0x33, 0x23, 0x1b, 0x92, 0xa4, 0x77,
|
0x15, 0xfd, 0x00, 0x1b, 0x8e, 0x94, 0xf7, 0x1c, 0x6e, 0x5f, 0xd6, 0x3e, 0xc3, 0xb5, 0x2f, 0xa0,
|
||||||
0xbc, 0x41, 0xe0, 0xfa, 0xb8, 0xb0, 0x8c, 0xae, 0x31, 0x09, 0xba, 0x56, 0xcc, 0xc2, 0xfd, 0x67,
|
0x9e, 0xd1, 0x82, 0x57, 0x7b, 0xb7, 0x83, 0x80, 0x2a, 0xd3, 0x4b, 0xed, 0x6b, 0x7a, 0x90, 0x45,
|
||||||
0x0b, 0xaa, 0x4b, 0x43, 0xbd, 0xfc, 0xb3, 0x6c, 0xee, 0x53, 0x31, 0xff, 0x3f, 0x7e, 0x2a, 0xfe,
|
0x63, 0xc3, 0xbb, 0x05, 0x0d, 0x54, 0x9d, 0x46, 0xf0, 0xcf, 0x05, 0xa8, 0x24, 0x2a, 0x5e, 0x4f,
|
||||||
0xdb, 0x4a, 0x1b, 0x52, 0x53, 0x8d, 0xbc, 0x4f, 0x63, 0xe6, 0x05, 0xc2, 0xa4, 0x6c, 0x42, 0x23,
|
0xf8, 0xfd, 0x34, 0xcf, 0xef, 0x69, 0x97, 0x5f, 0x41, 0x49, 0x97, 0x18, 0xeb, 0x72, 0xce, 0x24,
|
||||||
0x0f, 0xda, 0x1a, 0xf5, 0x4d, 0xd1, 0x97, 0xcb, 0x59, 0xf1, 0xce, 0x9b, 0xe2, 0x2d, 0x93, 0xe0,
|
0xd3, 0xe9, 0x67, 0xc4, 0x34, 0x9c, 0xfc, 0x1a, 0xca, 0x8c, 0x4b, 0x3d, 0x75, 0x99, 0xd7, 0xc9,
|
||||||
0x03, 0xa7, 0xa1, 0x0a, 0x51, 0x85, 0xa8, 0xb5, 0xac, 0xd7, 0x47, 0x4c, 0x69, 0x57, 0x54, 0xb6,
|
0xb3, 0xd9, 0x82, 0x06, 0x33, 0x16, 0xb6, 0x42, 0x5a, 0xbc, 0x1b, 0x0c, 0x22, 0x3f, 0xa4, 0xa5,
|
||||||
0x18, 0x49, 0xd9, 0xbb, 0xec, 0xe3, 0xa2, 0x76, 0xbc, 0x75, 0xa9, 0xba, 0xd0, 0x11, 0x93, 0xba,
|
0x79, 0xe2, 0x06, 0x93, 0x11, 0x37, 0x8c, 0x71, 0xb8, 0xff, 0xea, 0x40, 0x7d, 0x6e, 0xa8, 0xe7,
|
||||||
0x92, 0x02, 0x6a, 0x41, 0xe2, 0x4e, 0xc5, 0x14, 0x97, 0x75, 0xaa, 0x9d, 0x8a, 0xa9, 0x6c, 0x28,
|
0x3f, 0x1f, 0xa7, 0x9e, 0xb4, 0xc5, 0xff, 0xf3, 0x49, 0xfb, 0x97, 0xc2, 0xa4, 0x22, 0x1c, 0xc0,
|
||||||
0x84, 0xf9, 0x7e, 0xd7, 0xed, 0x0d, 0x71, 0x45, 0x77, 0xb2, 0x48, 0x96, 0x93, 0x9e, 0x8c, 0xae,
|
0xf4, 0x7d, 0x1a, 0x8a, 0x20, 0x52, 0x36, 0x65, 0x33, 0x1c, 0x7d, 0xd0, 0xf6, 0x49, 0xdf, 0xf6,
|
||||||
0xe7, 0xfa, 0xea, 0x9b, 0xa0, 0x4c, 0x22, 0xd1, 0xd9, 0x81, 0x4a, 0x9c, 0x14, 0xb2, 0x47, 0xb5,
|
0x05, 0xbd, 0xd4, 0xd7, 0x6c, 0x47, 0x68, 0x5e, 0x1d, 0xd3, 0xc0, 0x10, 0xe3, 0xaa, 0x5f, 0xb4,
|
||||||
0xfb, 0x2a, 0xe8, 0x35, 0x92, 0x6b, 0xf7, 0xa3, 0x7c, 0xce, 0xcd, 0xe7, 0x73, 0x3e, 0x91, 0xcf,
|
0x55, 0x5f, 0xa7, 0xc6, 0x07, 0xc9, 0x63, 0x0c, 0x5c, 0x8d, 0xe1, 0x5a, 0x57, 0xfa, 0x1d, 0x81,
|
||||||
0xdb, 0x50, 0x4b, 0xa5, 0x87, 0x04, 0x11, 0x76, 0xc9, 0x8d, 0x21, 0xb5, 0x96, 0xba, 0x16, 0xf3,
|
0xdc, 0x05, 0x14, 0xb6, 0x14, 0x5a, 0x39, 0xeb, 0xd3, 0xb2, 0x09, 0x47, 0xfb, 0x2c, 0xb1, 0x72,
|
||||||
0xf5, 0x57, 0x6f, 0x8d, 0xa8, 0xb5, 0xf3, 0x1c, 0x6a, 0xa9, 0xc4, 0x58, 0x54, 0x81, 0x9d, 0x67,
|
0xd6, 0xa7, 0x95, 0xd4, 0xca, 0x19, 0x5a, 0xd9, 0x57, 0x17, 0xb4, 0x6a, 0x12, 0x70, 0x5f, 0x5d,
|
||||||
0x50, 0xeb, 0x08, 0x57, 0x4c, 0x96, 0xfc, 0x4d, 0xf1, 0x1f, 0x0b, 0xd6, 0x22, 0x8c, 0xa9, 0x31,
|
0xe8, 0x4e, 0xc4, 0x44, 0x18, 0x1e, 0xf8, 0xbd, 0x63, 0x5a, 0x33, 0x2d, 0x30, 0xa1, 0xf5, 0xa8,
|
||||||
0xbf, 0x84, 0xf2, 0x05, 0x0d, 0x05, 0xbd, 0x8a, 0xbb, 0x0e, 0x9e, 0x1f, 0x34, 0x3f, 0x2a, 0x04,
|
0xaa, 0x63, 0x1e, 0xf8, 0x21, 0x3e, 0x6a, 0xaa, 0x2c, 0x21, 0xbd, 0x35, 0xa8, 0xa5, 0xa9, 0xa2,
|
||||||
0x89, 0x91, 0xe8, 0x1b, 0x28, 0x73, 0x65, 0x87, 0x46, 0x13, 0xcb, 0x67, 0x59, 0x2c, 0xb3, 0x5f,
|
0x9b, 0x5b, 0xa7, 0x8f, 0x9f, 0xa2, 0xc1, 0x0a, 0x9d, 0x7e, 0x92, 0xe5, 0x85, 0xe9, 0x2c, 0x2f,
|
||||||
0x8c, 0x47, 0x4d, 0x28, 0xf8, 0x6c, 0xc0, 0xd5, 0x7b, 0xaf, 0x6e, 0x3d, 0xce, 0xe2, 0xbd, 0x63,
|
0x66, 0xb2, 0xfc, 0x35, 0x34, 0x26, 0x92, 0x46, 0x83, 0x98, 0x38, 0x93, 0x56, 0x11, 0xae, 0x35,
|
||||||
0x03, 0xa2, 0x80, 0xe8, 0x2d, 0x94, 0x2f, 0xdd, 0x30, 0xf0, 0x82, 0x41, 0xf4, 0xb5, 0xfc, 0x34,
|
0xaf, 0x2d, 0x42, 0xf3, 0x66, 0x6f, 0x30, 0x5c, 0x7b, 0xcf, 0xa0, 0x31, 0x91, 0x2e, 0xb3, 0xea,
|
||||||
0x8b, 0xf4, 0x83, 0xc6, 0x91, 0x98, 0xe0, 0xd4, 0xe4, 0x75, 0x39, 0x63, 0x26, 0x26, 0xce, 0x6f,
|
0xb2, 0xf7, 0x14, 0x1a, 0xa6, 0xc1, 0xe5, 0x97, 0x9d, 0xff, 0x3a, 0xb0, 0x94, 0x60, 0x6c, 0xe5,
|
||||||
0x64, 0xd6, 0x4a, 0xd1, 0xb8, 0x7f, 0x08, 0x35, 0x9d, 0xf9, 0x1f, 0x69, 0xc8, 0xe5, 0xfc, 0x67,
|
0xf9, 0x15, 0x54, 0x4f, 0x79, 0xac, 0xf8, 0x79, 0xda, 0x8b, 0xe8, 0xf4, 0xa4, 0xfc, 0x11, 0x11,
|
||||||
0x2d, 0xbb, 0x9d, 0xbb, 0x49, 0x28, 0x49, 0x33, 0x9d, 0x4f, 0xa6, 0xb1, 0x45, 0x0a, 0x99, 0x4b,
|
0x2c, 0x45, 0xea, 0x27, 0xbc, 0x44, 0x3d, 0x3c, 0x19, 0x75, 0x1e, 0xe7, 0x49, 0x59, 0x7b, 0x29,
|
||||||
0x63, 0xb7, 0x37, 0x74, 0x07, 0xd1, 0x7b, 0x8a, 0x44, 0xf9, 0xe4, 0xc2, 0xec, 0xa7, 0x2f, 0x68,
|
0x9e, 0xac, 0x40, 0x29, 0x14, 0x03, 0x89, 0xdf, 0xbd, 0xbe, 0xfa, 0x30, 0x4f, 0xee, 0x9d, 0x18,
|
||||||
0x24, 0xca, 0xdc, 0x0c, 0xe9, 0x85, 0xc7, 0x67, 0xa3, 0x68, 0x2c, 0x6f, 0xfd, 0xa5, 0x04, 0xd0,
|
0x30, 0x04, 0x92, 0xb7, 0x50, 0x3d, 0xf3, 0xe3, 0x28, 0x88, 0x06, 0xc9, 0x73, 0xff, 0x49, 0x9e,
|
||||||
0x8a, 0xcf, 0x83, 0x4e, 0x60, 0x45, 0xed, 0x87, 0x9c, 0xa5, 0x6d, 0x52, 0xf9, 0x6d, 0x3f, 0xbf,
|
0xd0, 0x77, 0x06, 0xc7, 0x52, 0x01, 0xaf, 0xa1, 0x2f, 0xd1, 0xa1, 0xb0, 0x31, 0xf1, 0x7e, 0xa7,
|
||||||
0x45, 0x2b, 0x45, 0x1f, 0x65, 0xf2, 0xab, 0xf1, 0x06, 0xbd, 0xc8, 0x2a, 0x08, 0xc9, 0x09, 0xc9,
|
0x73, 0x59, 0x93, 0xd6, 0xfd, 0x6d, 0x68, 0x98, 0xfb, 0xf0, 0x91, 0xc7, 0x52, 0x0f, 0x8e, 0xce,
|
||||||
0x7e, 0x79, 0x03, 0xca, 0xd8, 0xfd, 0x00, 0x45, 0x9d, 0x05, 0x28, 0xab, 0xea, 0x25, 0xf3, 0xd6,
|
0xbc, 0x3b, 0xbb, 0x9e, 0x85, 0xb2, 0x49, 0x49, 0xef, 0x07, 0xdb, 0xee, 0x12, 0x86, 0xce, 0xa5,
|
||||||
0x7e, 0xb1, 0x1c, 0xa4, 0x8d, 0x7e, 0x61, 0x21, 0x62, 0x6a, 0x22, 0x72, 0x96, 0x34, 0x3d, 0x73,
|
0xa1, 0xdf, 0x3b, 0xf6, 0x07, 0xc9, 0x77, 0x4a, 0x48, 0xbd, 0x73, 0x6a, 0xed, 0x99, 0x6b, 0x9b,
|
||||||
0x63, 0xb2, 0x02, 0x90, 0xea, 0x2f, 0x75, 0x0b, 0x7d, 0x0f, 0x45, 0x5d, 0xd5, 0xd0, 0x4f, 0x17,
|
0x90, 0x3a, 0x37, 0x63, 0x7e, 0x1a, 0xc8, 0xf1, 0x0c, 0x9b, 0xd2, 0xab, 0x7f, 0xab, 0x00, 0xb4,
|
||||||
0x13, 0x22, 0x7b, 0xcb, 0x1f, 0xd7, 0xad, 0x2f, 0x2c, 0xf4, 0x1e, 0x0a, 0xb2, 0x9d, 0xa3, 0x8c,
|
0xd3, 0xf3, 0x90, 0x3d, 0x58, 0x40, 0x7b, 0xc4, 0x9b, 0xdb, 0x3c, 0xd1, 0x6f, 0xf7, 0xd9, 0x35,
|
||||||
0xde, 0x94, 0x98, 0x05, 0x6c, 0x67, 0x19, 0xc4, 0x44, 0xf1, 0x13, 0xc0, 0x6c, 0xa8, 0x40, 0x19,
|
0x1a, 0x2c, 0xf9, 0xa8, 0x93, 0x1f, 0x87, 0x1e, 0xf2, 0x3c, 0xaf, 0x4c, 0x64, 0xe7, 0x26, 0xf7,
|
||||||
0xff, 0x79, 0xcc, 0x4d, 0x27, 0x76, 0xfd, 0x66, 0xa0, 0xd9, 0xe0, 0xbd, 0xec, 0xa8, 0x67, 0x0c,
|
0xc5, 0x15, 0x28, 0xab, 0xf7, 0x03, 0x94, 0x4d, 0x16, 0x90, 0xbc, 0x5a, 0x98, 0xcd, 0x5b, 0xf7,
|
||||||
0x65, 0xf6, 0xd2, 0xf8, 0x1a, 0xd9, 0xce, 0x32, 0x88, 0x31, 0x77, 0x0e, 0xb5, 0xd4, 0x7f, 0xa2,
|
0xf9, 0x7c, 0x90, 0x51, 0xfa, 0x99, 0x43, 0x98, 0xad, 0x94, 0xc4, 0x9b, 0xd3, 0x0a, 0xed, 0x8d,
|
||||||
0xe8, 0xe7, 0xd9, 0x4e, 0x5e, 0xff, 0x8b, 0xd5, 0x7e, 0x75, 0x2b, 0xac, 0xd9, 0x49, 0x24, 0xa7,
|
0xc9, 0x0b, 0xc0, 0x44, 0xd7, 0x69, 0x3a, 0xe4, 0x5b, 0x28, 0x9b, 0x5a, 0x47, 0x3e, 0x99, 0x2d,
|
||||||
0x32, 0xf3, 0x18, 0x35, 0x6e, 0xf2, 0x3b, 0xfd, 0xff, 0xa6, 0xdd, 0xbc, 0x35, 0x5e, 0xef, 0xba,
|
0x90, 0xe8, 0x9b, 0xbf, 0xdd, 0x74, 0x3e, 0x73, 0xc8, 0x7b, 0x28, 0xe9, 0x26, 0x4f, 0x72, 0x3a,
|
||||||
0x5b, 0xf8, 0x6d, 0x6e, 0xdc, 0xed, 0x16, 0xd5, 0x5f, 0xc5, 0x5f, 0xfd, 0x37, 0x00, 0x00, 0xff,
|
0x56, 0x66, 0x42, 0x70, 0xbd, 0x79, 0x10, 0x1b, 0xc5, 0x1f, 0x00, 0xc6, 0xa3, 0x06, 0xc9, 0xf9,
|
||||||
0xff, 0xc1, 0x4b, 0x2d, 0x65, 0xc8, 0x16, 0x00, 0x00,
|
0xd3, 0x66, 0x6a, 0x66, 0x71, 0x9b, 0x57, 0x03, 0xad, 0x81, 0xf7, 0xba, 0xcf, 0x1e, 0x0a, 0x92,
|
||||||
|
0xdb, 0x61, 0xd3, 0x6b, 0xe4, 0x7a, 0xf3, 0x20, 0x56, 0xdd, 0x11, 0x34, 0x26, 0xfe, 0xd1, 0x25,
|
||||||
|
0xbf, 0xc8, 0x77, 0xf2, 0xf2, 0x1f, 0xc4, 0xee, 0xcb, 0x6b, 0x61, 0xad, 0x25, 0x95, 0x9d, 0xd5,
|
||||||
|
0xec, 0x36, 0x69, 0x5d, 0xe5, 0xf7, 0xe4, 0xbf, 0xb3, 0xee, 0xca, 0xb5, 0xf1, 0xc6, 0xea, 0x7a,
|
||||||
|
0xe9, 0xfb, 0xc2, 0xf0, 0xe0, 0xa0, 0x8c, 0x7f, 0x74, 0x7f, 0xf1, 0xbf, 0x00, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xc9, 0xe6, 0x4b, 0xb6, 0x86, 0x17, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ message BuildRequest {
|
|||||||
message BuildOptions {
|
message BuildOptions {
|
||||||
string ContextPath = 1;
|
string ContextPath = 1;
|
||||||
string DockerfileName = 2;
|
string DockerfileName = 2;
|
||||||
PrintFunc PrintFunc = 3;
|
CallFunc CallFunc = 3;
|
||||||
map<string, string> NamedContexts = 4;
|
map<string, string> NamedContexts = 4;
|
||||||
|
|
||||||
repeated string Allow = 5;
|
repeated string Allow = 5;
|
||||||
@@ -77,6 +77,10 @@ message BuildOptions {
|
|||||||
bool ExportPush = 26;
|
bool ExportPush = 26;
|
||||||
bool ExportLoad = 27;
|
bool ExportLoad = 27;
|
||||||
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 28;
|
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 28;
|
||||||
|
string Ref = 29;
|
||||||
|
string GroupRef = 30;
|
||||||
|
repeated string Annotations = 31;
|
||||||
|
string ProvenanceResponseMode = 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExportEntry {
|
message ExportEntry {
|
||||||
@@ -107,9 +111,10 @@ message Secret {
|
|||||||
string Env = 3;
|
string Env = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PrintFunc {
|
message CallFunc {
|
||||||
string Name = 1;
|
string Name = 1;
|
||||||
string Format = 2;
|
string Format = 2;
|
||||||
|
bool IgnoreStatus = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InspectRequest {
|
message InspectRequest {
|
||||||
@@ -192,6 +197,7 @@ message InitMessage {
|
|||||||
message InvokeConfig {
|
message InvokeConfig {
|
||||||
repeated string Entrypoint = 1;
|
repeated string Entrypoint = 1;
|
||||||
repeated string Cmd = 2;
|
repeated string Cmd = 2;
|
||||||
|
bool NoCmd = 11; // Do not set cmd but use the image's default
|
||||||
repeated string Env = 3;
|
repeated string Env = 3;
|
||||||
string User = 4;
|
string User = 4;
|
||||||
bool NoUser = 5; // Do not set user but use the image's default
|
bool NoUser = 5; // Do not set user but use the image's default
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
var stdoutUsed bool
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "" {
|
if entry.Type == "" {
|
||||||
return nil, errors.Errorf("type is required for output")
|
return nil, errors.Errorf("type is required for output")
|
||||||
@@ -68,10 +69,14 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
|||||||
entry.Destination = "-"
|
entry.Destination = "-"
|
||||||
}
|
}
|
||||||
if entry.Destination == "-" {
|
if entry.Destination == "-" {
|
||||||
|
if stdoutUsed {
|
||||||
|
return nil, errors.Errorf("multiple outputs configured to write to stdout")
|
||||||
|
}
|
||||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||||
return nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
return nil, errors.Errorf("dest file is required for %s exporter. refusing to write to console", out.Type)
|
||||||
}
|
}
|
||||||
out.Output = wrapWriteCloser(os.Stdout)
|
out.Output = wrapWriteCloser(os.Stdout)
|
||||||
|
stdoutUsed = true
|
||||||
} else if entry.Destination != "" {
|
} else if entry.Destination != "" {
|
||||||
fi, err := os.Stat(entry.Destination)
|
fi, err := os.Stat(entry.Destination)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
|
||||||
"github.com/moby/buildkit/util/gitutil"
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,7 +21,7 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if options.DockerfileName != "" && options.DockerfileName != "-" {
|
if options.DockerfileName != "" && options.DockerfileName != "-" {
|
||||||
if localContext && !urlutil.IsURL(options.DockerfileName) {
|
if localContext && !isHTTPURL(options.DockerfileName) {
|
||||||
options.DockerfileName, err = filepath.Abs(options.DockerfileName)
|
options.DockerfileName, err = filepath.Abs(options.DockerfileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -164,8 +163,15 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||||||
return options, nil
|
return options, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isHTTPURL returns true if the provided str is an HTTP(S) URL by checking if it
|
||||||
|
// has a http:// or https:// scheme. No validation is performed to verify if the
|
||||||
|
// URL is well-formed.
|
||||||
|
func isHTTPURL(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
|
||||||
|
}
|
||||||
|
|
||||||
func isRemoteURL(c string) bool {
|
func isRemoteURL(c string) bool {
|
||||||
if urlutil.IsURL(c) {
|
if isHTTPURL(c) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if _, err := gitutil.ParseGitRef(c); err == nil {
|
if _, err := gitutil.ParseGitRef(c); err == nil {
|
||||||
|
|||||||
@@ -236,6 +236,7 @@ func TestResolvePaths(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got, err := ResolveOptionPaths(&tt.options)
|
got, err := ResolveOptionPaths(&tt.options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||||||
go func() {
|
go func() {
|
||||||
var err error
|
var err error
|
||||||
if err = ctr.Exec(ctx, cfg, in.Stdin, in.Stdout, in.Stderr); err != nil {
|
if err = ctr.Exec(ctx, cfg, in.Stdin, in.Stdout, in.Stderr); err != nil {
|
||||||
logrus.Errorf("failed to exec process: %v", err)
|
logrus.Debugf("process error: %v", err)
|
||||||
}
|
}
|
||||||
logrus.Debugf("finished process %s %v", pid, cfg.Entrypoint)
|
logrus.Debugf("finished process %s %v", pid, cfg.Entrypoint)
|
||||||
m.processes.Delete(pid)
|
m.processes.Delete(pid)
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
} else if n > 0 {
|
} else if n > 0 {
|
||||||
if stream.Send(&pb.InputMessage{
|
if err := stream.Send(&pb.InputMessage{
|
||||||
Input: &pb.InputMessage_Data{
|
Input: &pb.InputMessage_Data{
|
||||||
Data: &pb.DataMessage{
|
Data: &pb.DataMessage{
|
||||||
Data: buf[:n],
|
Data: buf[:n],
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/log"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
cbuild "github.com/docker/buildx/controller/build"
|
cbuild "github.com/docker/buildx/controller/build"
|
||||||
"github.com/docker/buildx/controller/control"
|
"github.com/docker/buildx/controller/control"
|
||||||
|
|||||||
@@ -207,6 +207,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
|||||||
|
|
||||||
if cfg.signal != nil {
|
if cfg.signal != nil {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
names := signalNames()
|
||||||
for {
|
for {
|
||||||
var sig syscall.Signal
|
var sig syscall.Signal
|
||||||
select {
|
select {
|
||||||
@@ -216,7 +217,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
name := sigToName[sig]
|
name := names[sig]
|
||||||
if name == "" {
|
if name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -358,7 +359,7 @@ func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
} else if n > 0 {
|
} else if n > 0 {
|
||||||
if snd.Send(&pb.Message{
|
if err := snd.Send(&pb.Message{
|
||||||
Input: &pb.Message_File{
|
Input: &pb.Message_File{
|
||||||
File: &pb.FdMessage{
|
File: &pb.FdMessage{
|
||||||
Fd: fd,
|
Fd: fd,
|
||||||
@@ -380,12 +381,12 @@ func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var sigToName = map[syscall.Signal]string{}
|
func signalNames() map[syscall.Signal]string {
|
||||||
|
m := make(map[syscall.Signal]string, len(signal.SignalMap))
|
||||||
func init() {
|
|
||||||
for name, value := range signal.SignalMap {
|
for name, value := range signal.SignalMap {
|
||||||
sigToName[value] = name
|
m[value] = name
|
||||||
}
|
}
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
type debugStream struct {
|
type debugStream struct {
|
||||||
|
|||||||
@@ -57,9 +57,7 @@ func (m *Server) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest
|
|||||||
return nil, errors.Errorf("unknown ref %q", req.Ref)
|
return nil, errors.Errorf("unknown ref %q", req.Ref)
|
||||||
}
|
}
|
||||||
res = new(pb.ListProcessesResponse)
|
res = new(pb.ListProcessesResponse)
|
||||||
for _, p := range s.processes.ListProcesses() {
|
res.Infos = append(res.Infos, s.processes.ListProcesses()...)
|
||||||
res.Infos = append(res.Infos, p)
|
|
||||||
}
|
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
variable "GO_VERSION" {
|
variable "GO_VERSION" {
|
||||||
default = "1.20"
|
default = null
|
||||||
}
|
}
|
||||||
variable "DOCS_FORMATS" {
|
variable "DOCS_FORMATS" {
|
||||||
default = "md"
|
default = "md"
|
||||||
@@ -7,6 +7,12 @@ variable "DOCS_FORMATS" {
|
|||||||
variable "DESTDIR" {
|
variable "DESTDIR" {
|
||||||
default = "./bin"
|
default = "./bin"
|
||||||
}
|
}
|
||||||
|
variable "TEST_COVERAGE" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
variable "GOLANGCI_LINT_MULTIPLATFORM" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
# Special target: https://github.com/docker/metadata-action#bake-definition
|
# Special target: https://github.com/docker/metadata-action#bake-definition
|
||||||
target "meta-helper" {
|
target "meta-helper" {
|
||||||
@@ -25,13 +31,37 @@ group "default" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
group "validate" {
|
group "validate" {
|
||||||
targets = ["lint", "validate-vendor", "validate-docs"]
|
targets = ["lint", "lint-gopls", "validate-golangci", "validate-vendor", "validate-docs"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "lint" {
|
target "lint" {
|
||||||
inherits = ["_common"]
|
inherits = ["_common"]
|
||||||
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
||||||
output = ["type=cacheonly"]
|
output = ["type=cacheonly"]
|
||||||
|
platforms = GOLANGCI_LINT_MULTIPLATFORM != "" ? [
|
||||||
|
"darwin/amd64",
|
||||||
|
"darwin/arm64",
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/s390x",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/riscv64",
|
||||||
|
"windows/amd64",
|
||||||
|
"windows/arm64"
|
||||||
|
] : []
|
||||||
|
}
|
||||||
|
|
||||||
|
target "validate-golangci" {
|
||||||
|
description = "Validate .golangci.yml schema (does not run Go linter)"
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
||||||
|
target = "validate-golangci"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "lint-gopls" {
|
||||||
|
inherits = ["lint"]
|
||||||
|
target = "gopls-analyze"
|
||||||
}
|
}
|
||||||
|
|
||||||
target "validate-vendor" {
|
target "validate-vendor" {
|
||||||
@@ -166,13 +196,18 @@ variable "HTTPS_PROXY" {
|
|||||||
variable "NO_PROXY" {
|
variable "NO_PROXY" {
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
variable "TEST_BUILDKIT_TAG" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
target "integration-test-base" {
|
target "integration-test-base" {
|
||||||
inherits = ["_common"]
|
inherits = ["_common"]
|
||||||
args = {
|
args = {
|
||||||
|
GO_EXTRA_FLAGS = TEST_COVERAGE == "1" ? "-cover" : null
|
||||||
HTTP_PROXY = HTTP_PROXY
|
HTTP_PROXY = HTTP_PROXY
|
||||||
HTTPS_PROXY = HTTPS_PROXY
|
HTTPS_PROXY = HTTPS_PROXY
|
||||||
NO_PROXY = NO_PROXY
|
NO_PROXY = NO_PROXY
|
||||||
|
BUILDKIT_VERSION = TEST_BUILDKIT_TAG
|
||||||
}
|
}
|
||||||
target = "integration-test-base"
|
target = "integration-test-base"
|
||||||
output = ["type=cacheonly"]
|
output = ["type=cacheonly"]
|
||||||
@@ -182,3 +217,18 @@ target "integration-test" {
|
|||||||
inherits = ["integration-test-base"]
|
inherits = ["integration-test-base"]
|
||||||
target = "integration-test"
|
target = "integration-test"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "GOVULNCHECK_FORMAT" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
target "govulncheck" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/govulncheck.Dockerfile"
|
||||||
|
target = "output"
|
||||||
|
args = {
|
||||||
|
FORMAT = GOVULNCHECK_FORMAT
|
||||||
|
}
|
||||||
|
no-cache-filter = ["run"]
|
||||||
|
output = ["${DESTDIR}"]
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
# Bake file reference
|
---
|
||||||
|
title: Bake file reference
|
||||||
|
---
|
||||||
|
|
||||||
The Bake file is a file for defining workflows that you run using `docker buildx bake`.
|
The Bake file is a file for defining workflows that you run using `docker buildx bake`.
|
||||||
|
|
||||||
@@ -12,18 +14,118 @@ You can define your Bake file in the following file formats:
|
|||||||
|
|
||||||
By default, Bake uses the following lookup order to find the configuration file:
|
By default, Bake uses the following lookup order to find the configuration file:
|
||||||
|
|
||||||
1. `docker-bake.override.hcl`
|
1. `compose.yaml`
|
||||||
2. `docker-bake.hcl`
|
2. `compose.yml`
|
||||||
3. `docker-bake.override.json`
|
3. `docker-compose.yml`
|
||||||
4. `docker-bake.json`
|
4. `docker-compose.yaml`
|
||||||
5. `docker-compose.yaml`
|
5. `docker-bake.json`
|
||||||
6. `docker-compose.yml`
|
6. `docker-bake.override.json`
|
||||||
|
7. `docker-bake.hcl`
|
||||||
|
8. `docker-bake.override.hcl`
|
||||||
|
|
||||||
Bake searches for the file in the current working directory.
|
|
||||||
You can specify the file location explicitly using the `--file` flag:
|
You can specify the file location explicitly using the `--file` flag:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx bake --file=../docker/bake.hcl --print
|
$ docker buildx bake --file ../docker/bake.hcl --print
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't specify a file explicitly, Bake searches for the file in the
|
||||||
|
current working directory. If more than one Bake file is found, all files are
|
||||||
|
merged into a single definition. Files are merged according to the lookup
|
||||||
|
order. That means that if your project contains both a `compose.yaml` file and
|
||||||
|
a `docker-bake.hcl` file, Bake loads the `compose.yaml` file first, and then
|
||||||
|
the `docker-bake.hcl` file.
|
||||||
|
|
||||||
|
If merged files contain duplicate attribute definitions, those definitions are
|
||||||
|
either merged or overridden by the last occurrence, depending on the attribute.
|
||||||
|
The following attributes are overridden by the last occurrence:
|
||||||
|
|
||||||
|
- `target.cache-to`
|
||||||
|
- `target.dockerfile-inline`
|
||||||
|
- `target.dockerfile`
|
||||||
|
- `target.outputs`
|
||||||
|
- `target.platforms`
|
||||||
|
- `target.pull`
|
||||||
|
- `target.tags`
|
||||||
|
- `target.target`
|
||||||
|
|
||||||
|
For example, if `compose.yaml` and `docker-bake.hcl` both define the `tags`
|
||||||
|
attribute, the `docker-bake.hcl` is used.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ cat compose.yaml
|
||||||
|
services:
|
||||||
|
webapp:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
tags:
|
||||||
|
- bar
|
||||||
|
$ cat docker-bake.hcl
|
||||||
|
target "webapp" {
|
||||||
|
tags = ["foo"]
|
||||||
|
}
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"foo"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All other attributes are merged. For example, if `compose.yaml` and
|
||||||
|
`docker-bake.hcl` both define unique entries for the `labels` attribute, all
|
||||||
|
entries are included. Duplicate entries for the same label are overridden.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ cat compose.yaml
|
||||||
|
services:
|
||||||
|
webapp:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
labels:
|
||||||
|
com.example.foo: "foo"
|
||||||
|
com.example.name: "Alice"
|
||||||
|
$ cat docker-bake.hcl
|
||||||
|
target "webapp" {
|
||||||
|
labels = {
|
||||||
|
"com.example.bar" = "bar"
|
||||||
|
"com.example.name" = "Bob"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"labels": {
|
||||||
|
"com.example.foo": "foo",
|
||||||
|
"com.example.bar": "bar",
|
||||||
|
"com.example.name": "Bob"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
@@ -113,8 +215,9 @@ target "webapp" {
|
|||||||
The following table shows the complete list of attributes that you can assign to a target:
|
The following table shows the complete list of attributes that you can assign to a target:
|
||||||
|
|
||||||
| Name | Type | Description |
|
| Name | Type | Description |
|
||||||
| ----------------------------------------------- | ------- | -------------------------------------------------------------------- |
|
|-------------------------------------------------|---------|----------------------------------------------------------------------|
|
||||||
| [`args`](#targetargs) | Map | Build arguments |
|
| [`args`](#targetargs) | Map | Build arguments |
|
||||||
|
| [`annotations`](#targetannotations) | List | Exporter annotations |
|
||||||
| [`attest`](#targetattest) | List | Build attestations |
|
| [`attest`](#targetattest) | List | Build attestations |
|
||||||
| [`cache-from`](#targetcache-from) | List | External cache sources |
|
| [`cache-from`](#targetcache-from) | List | External cache sources |
|
||||||
| [`cache-to`](#targetcache-to) | List | External cache destinations |
|
| [`cache-to`](#targetcache-to) | List | External cache destinations |
|
||||||
@@ -132,9 +235,11 @@ The following table shows the complete list of attributes that you can assign to
|
|||||||
| [`platforms`](#targetplatforms) | List | Target platforms |
|
| [`platforms`](#targetplatforms) | List | Target platforms |
|
||||||
| [`pull`](#targetpull) | Boolean | Always pull images |
|
| [`pull`](#targetpull) | Boolean | Always pull images |
|
||||||
| [`secret`](#targetsecret) | List | Secrets to expose to the build |
|
| [`secret`](#targetsecret) | List | Secrets to expose to the build |
|
||||||
|
| [`shm-size`](#targetshm-size) | List | Size of `/dev/shm` |
|
||||||
| [`ssh`](#targetssh) | List | SSH agent sockets or keys to expose to the build |
|
| [`ssh`](#targetssh) | List | SSH agent sockets or keys to expose to the build |
|
||||||
| [`tags`](#targettags) | List | Image names and tags |
|
| [`tags`](#targettags) | List | Image names and tags |
|
||||||
| [`target`](#targettarget) | String | Target build stage |
|
| [`target`](#targettarget) | String | Target build stage |
|
||||||
|
| [`ulimits`](#targetulimits) | List | Ulimit options |
|
||||||
|
|
||||||
### `target.args`
|
### `target.args`
|
||||||
|
|
||||||
@@ -171,6 +276,41 @@ target "db" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.annotations`
|
||||||
|
|
||||||
|
The `annotations` attribute lets you add annotations to images built with bake.
|
||||||
|
The key takes a list of annotations, in the format of `KEY=VALUE`.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "default" {
|
||||||
|
output = ["type=image,name=foo"]
|
||||||
|
annotations = ["org.opencontainers.image.authors=dvdksn"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
is the same as
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "default" {
|
||||||
|
output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, the annotation is added to image manifests. You can configure the
|
||||||
|
level of the annotations by adding a prefix to the annotation, containing a
|
||||||
|
comma-separated list of all the levels that you want to annotate. The following
|
||||||
|
example adds annotations to both the image index and manifests.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "default" {
|
||||||
|
output = ["type=image,name=foo"]
|
||||||
|
annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Read about the supported levels in
|
||||||
|
[Specifying annotation levels](https://docs.docker.com/build/building/annotations/#specifying-annotation-levels).
|
||||||
|
|
||||||
### `target.attest`
|
### `target.attest`
|
||||||
|
|
||||||
The `attest` attribute lets you apply [build attestations][attestations] to the target.
|
The `attest` attribute lets you apply [build attestations][attestations] to the target.
|
||||||
@@ -303,8 +443,7 @@ COPY --from=src . .
|
|||||||
|
|
||||||
#### Use another target as base
|
#### Use another target as base
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
>
|
|
||||||
> You should prefer to use regular multi-stage builds over this option. You can
|
> You should prefer to use regular multi-stage builds over this option. You can
|
||||||
> Use this feature when you have multiple Dockerfiles that can't be easily
|
> Use this feature when you have multiple Dockerfiles that can't be easily
|
||||||
> merged into one.
|
> merged into one.
|
||||||
@@ -366,6 +505,25 @@ $ docker buildx bake --print -f - <<< 'target "default" {}'
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.entitlements`
|
||||||
|
|
||||||
|
Entitlements are permissions that the build process requires to run.
|
||||||
|
|
||||||
|
Currently supported entitlements are:
|
||||||
|
|
||||||
|
- `network.host`: Allows the build to use commands that access the host network. In Dockerfile, use [`RUN --network=host`](https://docs.docker.com/reference/dockerfile/#run---networkhost) to run a command with host network enabled.
|
||||||
|
|
||||||
|
- `security.insecure`: Allows the build to run commands in privileged containers that are not limited by the default security sandbox. Such container may potentially access and modify system resources. In Dockerfile, use [`RUN --security=insecure`](https://docs.docker.com/reference/dockerfile/#run---security) to run a command in a privileged container.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "integration-tests" {
|
||||||
|
# this target requires privileged containers to run nested containers
|
||||||
|
entitlements = ["security.insecure"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process.
|
||||||
|
|
||||||
### `target.inherits`
|
### `target.inherits`
|
||||||
|
|
||||||
A target can inherit attributes from other targets.
|
A target can inherit attributes from other targets.
|
||||||
@@ -610,6 +768,27 @@ target "app" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.network`
|
||||||
|
|
||||||
|
Specify the network mode for the whole build request. This will override the default network mode
|
||||||
|
for all the `RUN` instructions in the Dockerfile. Accepted values are `default`, `host`, and `none`.
|
||||||
|
|
||||||
|
Usually, a better approach to set the network mode for your build steps is to instead use `RUN --network=<value>`
|
||||||
|
in your Dockerfile. This way, you can set the network mode for individual build steps and everyone building
|
||||||
|
the Dockerfile gets consistent behavior without needing to pass additional flags to the build command.
|
||||||
|
|
||||||
|
If you set network mode to `host` in your Bake file, you must also grant `network.host` entitlement when
|
||||||
|
invoking the `bake` command. This is because `host` network mode requires elevated privileges and can be a security risk.
|
||||||
|
You can pass `--allow=network.host` to the `docker buildx bake` command to grant the entitlement, or you can
|
||||||
|
confirm the entitlement when prompted if you are using an interactive terminal.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "app" {
|
||||||
|
# make sure this build does not access internet
|
||||||
|
network = "none"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `target.no-cache-filter`
|
### `target.no-cache-filter`
|
||||||
|
|
||||||
Don't use build cache for the specified stages.
|
Don't use build cache for the specified stages.
|
||||||
@@ -692,10 +871,32 @@ This lets you [mount the secret][run_mount_secret] in your Dockerfile.
|
|||||||
```dockerfile
|
```dockerfile
|
||||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
||||||
aws cloudfront create-invalidation ...
|
aws cloudfront create-invalidation ...
|
||||||
RUN --mount=type=secret,id=KUBECONFIG \
|
RUN --mount=type=secret,id=KUBECONFIG,env=KUBECONFIG \
|
||||||
KUBECONFIG=$(cat /run/secrets/KUBECONFIG) helm upgrade --install
|
helm upgrade --install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.shm-size`
|
||||||
|
|
||||||
|
Sets the size of the shared memory allocated for build containers when using
|
||||||
|
`RUN` instructions.
|
||||||
|
|
||||||
|
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||||
|
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||||
|
(gigabytes). If you omit the unit, the system uses bytes.
|
||||||
|
|
||||||
|
This is the same as the `--shm-size` flag for `docker build`.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "default" {
|
||||||
|
shm-size = "128m"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> In most cases, it is recommended to let the builder automatically determine
|
||||||
|
> the appropriate configurations. Manual adjustments should only be considered
|
||||||
|
> when specific performance tuning is required for complex build scenarios.
|
||||||
|
|
||||||
### `target.ssh`
|
### `target.ssh`
|
||||||
|
|
||||||
Defines SSH agent sockets or keys to expose to the build.
|
Defines SSH agent sockets or keys to expose to the build.
|
||||||
@@ -742,6 +943,30 @@ target "default" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `target.ulimits`
|
||||||
|
|
||||||
|
Ulimits overrides the default ulimits of build's containers when using `RUN`
|
||||||
|
instructions and are specified with a soft and hard limit as such:
|
||||||
|
`<type>=<soft limit>[:<hard limit>]`, for example:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
target "app" {
|
||||||
|
ulimits = [
|
||||||
|
"nofile=1024:1024"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> If you do not provide a `hard limit`, the `soft limit` is used
|
||||||
|
> for both values. If no `ulimits` are set, they are inherited from
|
||||||
|
> the default `ulimits` set on the daemon.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> In most cases, it is recommended to let the builder automatically determine
|
||||||
|
> the appropriate configurations. Manual adjustments should only be considered
|
||||||
|
> when specific performance tuning is required for complex build scenarios.
|
||||||
|
|
||||||
## Group
|
## Group
|
||||||
|
|
||||||
Groups allow you to invoke multiple builds (targets) at once.
|
Groups allow you to invoke multiple builds (targets) at once.
|
||||||
@@ -925,28 +1150,27 @@ target "webapp-dev" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
>
|
|
||||||
> See [User defined HCL functions][hcl-funcs] page for more details.
|
> See [User defined HCL functions][hcl-funcs] page for more details.
|
||||||
|
|
||||||
<!-- external links -->
|
<!-- external links -->
|
||||||
|
|
||||||
[attestations]: https://docs.docker.com/build/attestations/
|
[attestations]: https://docs.docker.com/build/attestations/
|
||||||
[bake_stdlib]: https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go
|
[bake_stdlib]: https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go
|
||||||
[build-arg]: https://docs.docker.com/engine/reference/commandline/build/#build-arg
|
[build-arg]: https://docs.docker.com/reference/cli/docker/image/build/#build-arg
|
||||||
[build-context]: https://docs.docker.com/engine/reference/commandline/buildx_build/#build-context
|
[build-context]: https://docs.docker.com/reference/cli/docker/buildx/build/#build-context
|
||||||
[cache-backends]: https://docs.docker.com/build/cache/backends/
|
[cache-backends]: https://docs.docker.com/build/cache/backends/
|
||||||
[cache-from]: https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from
|
[cache-from]: https://docs.docker.com/reference/cli/docker/buildx/build/#cache-from
|
||||||
[cache-to]: https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to
|
[cache-to]: https://docs.docker.com/reference/cli/docker/buildx/build/#cache-to
|
||||||
[context]: https://docs.docker.com/engine/reference/commandline/buildx_build/#build-context
|
[context]: https://docs.docker.com/reference/cli/docker/buildx/build/#build-context
|
||||||
[file]: https://docs.docker.com/engine/reference/commandline/build/#file
|
[file]: https://docs.docker.com/reference/cli/docker/image/build/#file
|
||||||
[go-cty]: https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib
|
[go-cty]: https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib
|
||||||
[hcl-funcs]: https://docs.docker.com/build/bake/hcl-funcs/
|
[hcl-funcs]: https://docs.docker.com/build/bake/hcl-funcs/
|
||||||
[output]: https://docs.docker.com/engine/reference/commandline/buildx_build/#output
|
[output]: https://docs.docker.com/reference/cli/docker/buildx/build/#output
|
||||||
[platform]: https://docs.docker.com/engine/reference/commandline/buildx_build/#platform
|
[platform]: https://docs.docker.com/reference/cli/docker/buildx/build/#platform
|
||||||
[run_mount_secret]: https://docs.docker.com/engine/reference/builder/#run---mounttypesecret
|
[run_mount_secret]: https://docs.docker.com/reference/dockerfile/#run---mounttypesecret
|
||||||
[secret]: https://docs.docker.com/engine/reference/commandline/buildx_build/#secret
|
[secret]: https://docs.docker.com/reference/cli/docker/buildx/build/#secret
|
||||||
[ssh]: https://docs.docker.com/engine/reference/commandline/buildx_build/#ssh
|
[ssh]: https://docs.docker.com/reference/cli/docker/buildx/build/#ssh
|
||||||
[tag]: https://docs.docker.com/engine/reference/commandline/build/#tag
|
[tag]: https://docs.docker.com/reference/cli/docker/image/build/#tag
|
||||||
[target]: https://docs.docker.com/engine/reference/commandline/build/#target
|
[target]: https://docs.docker.com/reference/cli/docker/image/build/#target
|
||||||
[userfunc]: https://github.com/hashicorp/hcl/tree/main/ext/userfunc
|
[userfunc]: https://github.com/hashicorp/hcl/tree/main/ext/userfunc
|
||||||
|
|||||||
@@ -4,8 +4,7 @@ To assist with creating and debugging complex builds, Buildx provides a
|
|||||||
debugger to help you step through the build process and easily inspect the
|
debugger to help you step through the build process and easily inspect the
|
||||||
state of the build environment at any point.
|
state of the build environment at any point.
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
>
|
|
||||||
> The debug monitor is a new experimental feature in recent versions of Buildx.
|
> The debug monitor is a new experimental feature in recent versions of Buildx.
|
||||||
> There are rough edges, known bugs, and missing features. Please try it out
|
> There are rough edges, known bugs, and missing features. Please try it out
|
||||||
> and let us know what you think!
|
> and let us know what you think!
|
||||||
@@ -19,11 +18,13 @@ your environment.
|
|||||||
$ export BUILDX_EXPERIMENTAL=1
|
$ export BUILDX_EXPERIMENTAL=1
|
||||||
```
|
```
|
||||||
|
|
||||||
To start a debug session for a build, you can use the `--invoke` flag with the
|
To start a debug session for a build, you can use the `buildx debug` command with `--invoke` flag to specify a command to launch in the resulting image.
|
||||||
build command to specify a command to launch in the resulting image.
|
`buildx debug` command provides `buildx debug build` subcommand that provides the same features as the normal `buildx build` command but allows launching the debugger session after the build.
|
||||||
|
|
||||||
|
Arguments available after `buildx debug build` are the same as the normal `buildx build`.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --invoke /bin/sh .
|
$ docker buildx debug --invoke /bin/sh build .
|
||||||
[+] Building 4.2s (19/19) FINISHED
|
[+] Building 4.2s (19/19) FINISHED
|
||||||
=> [internal] connecting to local controller 0.0s
|
=> [internal] connecting to local controller 0.0s
|
||||||
=> [internal] load build definition from Dockerfile 0.0s
|
=> [internal] load build definition from Dockerfile 0.0s
|
||||||
@@ -56,16 +57,16 @@ Supported keys are `args` (can be JSON array format), `entrypoint` (can be JSON
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ docker buildx build --invoke 'entrypoint=["sh"],"args=[""-c"", ""env | grep -e FOO -e AAA""]","env=[""FOO=bar"", ""AAA=bbb""]"' .
|
$ docker buildx debug --invoke 'entrypoint=["sh"],"args=[""-c"", ""env | grep -e FOO -e AAA""]","env=[""FOO=bar"", ""AAA=bbb""]"' build .
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `on-error`
|
#### `on` flag
|
||||||
|
|
||||||
If you want to start a debug session when a build fails, you can use
|
If you want to start a debug session when a build fails, you can use
|
||||||
`--invoke=on-error` to start a debug session when the build fails.
|
`--on=error` to start a debug session when the build fails.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --invoke on-error .
|
$ docker buildx debug --on=error build .
|
||||||
[+] Building 4.2s (19/19) FINISHED
|
[+] Building 4.2s (19/19) FINISHED
|
||||||
=> [internal] connecting to local controller 0.0s
|
=> [internal] connecting to local controller 0.0s
|
||||||
=> [internal] load build definition from Dockerfile 0.0s
|
=> [internal] load build definition from Dockerfile 0.0s
|
||||||
@@ -85,13 +86,13 @@ Interactive container was restarted with process "edmzor60nrag7rh1mbi4o9lm8". Pr
|
|||||||
|
|
||||||
This allows you to explore the state of the image when the build failed.
|
This allows you to explore the state of the image when the build failed.
|
||||||
|
|
||||||
#### `debug-shell`
|
#### Launch the debug session directly with `buildx debug` subcommand
|
||||||
|
|
||||||
If you want to drop into a debug session without first starting the build, you
|
If you want to drop into a debug session without first starting the build, you
|
||||||
can use `--invoke=debug-shell` to start a debug session.
|
can use `buildx debug` command to start a debug session.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ docker buildx build --invoke debug-shell .
|
$ docker buildx debug
|
||||||
[+] Building 4.2s (19/19) FINISHED
|
[+] Building 4.2s (19/19) FINISHED
|
||||||
=> [internal] connecting to local controller 0.0s
|
=> [internal] connecting to local controller 0.0s
|
||||||
(buildx)
|
(buildx)
|
||||||
@@ -116,12 +117,12 @@ Available commands are:
|
|||||||
disconnect disconnect a client from a buildx server. Specific session ID can be specified an arg
|
disconnect disconnect a client from a buildx server. Specific session ID can be specified an arg
|
||||||
exec execute a process in the interactive container
|
exec execute a process in the interactive container
|
||||||
exit exits monitor
|
exit exits monitor
|
||||||
help shows this message
|
help shows this message. Optionally pass a command name as an argument to print the detailed usage.
|
||||||
kill kill buildx server
|
kill kill buildx server
|
||||||
list list buildx sessions
|
list list buildx sessions
|
||||||
ps list processes invoked by "exec". Use "attach" to attach IO to that process
|
ps list processes invoked by "exec". Use "attach" to attach IO to that process
|
||||||
reload reloads the context and build it
|
reload reloads the context and build it
|
||||||
rollback re-runs the interactive container with initial rootfs contents
|
rollback re-runs the interactive container with the step's rootfs contents
|
||||||
```
|
```
|
||||||
|
|
||||||
## Build controllers
|
## Build controllers
|
||||||
@@ -135,15 +136,15 @@ To detach the build process from the CLI, you can use the `--detach=true` flag w
|
|||||||
the build command.
|
the build command.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --detach=true --invoke /bin/sh .
|
$ docker buildx debug --invoke /bin/sh build --detach=true .
|
||||||
```
|
```
|
||||||
|
|
||||||
If you start a debugging session using the `--invoke` flag with a detached
|
If you start a debugging session using the `--invoke` flag with a detached
|
||||||
build, then you can attach to it using the `buildx debug-shell` subcommand to
|
build, then you can attach to it using the `buildx debug` command to
|
||||||
immediately enter the monitor mode.
|
immediately enter the monitor mode.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx debug-shell
|
$ docker buildx debug
|
||||||
[+] Building 0.0s (1/1) FINISHED
|
[+] Building 0.0s (1/1) FINISHED
|
||||||
=> [internal] connecting to remote controller
|
=> [internal] connecting to remote controller
|
||||||
(buildx) list
|
(buildx) list
|
||||||
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
clidocstool "github.com/docker/cli-docs-tool"
|
clidocstool "github.com/docker/cli-docs-tool"
|
||||||
@@ -26,6 +27,28 @@ type options struct {
|
|||||||
formats []string
|
formats []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fixUpExperimentalCLI trims the " (EXPERIMENTAL)" suffix from the CLI output,
|
||||||
|
// as docs.docker.com already displays "experimental (CLI)",
|
||||||
|
//
|
||||||
|
// https://github.com/docker/buildx/pull/2188#issuecomment-1889487022
|
||||||
|
func fixUpExperimentalCLI(cmd *cobra.Command) {
|
||||||
|
const (
|
||||||
|
annotationExperimentalCLI = "experimentalCLI"
|
||||||
|
suffixExperimental = " (EXPERIMENTAL)"
|
||||||
|
)
|
||||||
|
if _, ok := cmd.Annotations[annotationExperimentalCLI]; ok {
|
||||||
|
cmd.Short = strings.TrimSuffix(cmd.Short, suffixExperimental)
|
||||||
|
}
|
||||||
|
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||||
|
if _, ok := f.Annotations[annotationExperimentalCLI]; ok {
|
||||||
|
f.Usage = strings.TrimSuffix(f.Usage, suffixExperimental)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
for _, c := range cmd.Commands() {
|
||||||
|
fixUpExperimentalCLI(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func gen(opts *options) error {
|
func gen(opts *options) error {
|
||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
|
|
||||||
@@ -57,6 +80,8 @@ func gen(opts *options) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case "yaml":
|
case "yaml":
|
||||||
|
// fix up is needed only for yaml (used for generating docs.docker.com contents)
|
||||||
|
fixUpExperimentalCLI(cmd)
|
||||||
if err = c.GenYamlTree(cmd); err != nil {
|
if err = c.GenYamlTree(cmd); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
# CI/CD
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/ci/)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# CNI networking
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/buildkit/configure/#cni-networking)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Color output controls
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/building/env-vars/#buildkit_colors)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Using a custom network
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/drivers/docker-container/#custom-network)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Using a custom registry configuration
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/buildkit/configure/#setting-registry-certificates)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# OpenTelemetry support
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/building/opentelemetry/)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Registry mirror
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/buildkit/configure/#registry-mirror)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Resource limiting
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/buildkit/configure/#resource-limiting)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Defining additional build contexts and linking targets
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/bake/build-contexts)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Building from Compose file
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/bake/compose-file)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Configuring builds
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/bake/configuring-build)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Bake file definition
|
|
||||||
|
|
||||||
This page has moved to [docs/bake-reference.md](../../bake-reference.md)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# User defined HCL functions
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/bake/hcl-funcs)
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# High-level build options with Bake
|
|
||||||
|
|
||||||
This page has moved to [Docker Docs website](https://docs.docker.com/build/bake)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user