mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-21 03:05:39 +08:00
Compare commits
364 Commits
v0.2.1
...
v0.6.0-rc1
Author | SHA1 | Date | |
---|---|---|---|
![]() |
65a6955db8 | ||
![]() |
258d12b2e7 | ||
![]() |
6e3a319a9d | ||
![]() |
1bb425a882 | ||
![]() |
5f6ad50df4 | ||
![]() |
9d88450118 | ||
![]() |
334c93fbbe | ||
![]() |
6ba080d337 | ||
![]() |
ba443811e4 | ||
![]() |
67bd6f4dc8 | ||
![]() |
9f50eccbd7 | ||
![]() |
12db50748b | ||
![]() |
9b4937f062 | ||
![]() |
3d48359e95 | ||
![]() |
70002ebbc7 | ||
![]() |
ef95f8135b | ||
![]() |
9215fc56a3 | ||
![]() |
1253020b3d | ||
![]() |
621c55066c | ||
![]() |
77632ac15f | ||
![]() |
db6aa34252 | ||
![]() |
7ecfd3d298 | ||
![]() |
9a8c287629 | ||
![]() |
591099a4b8 | ||
![]() |
31309b9205 | ||
![]() |
8c0cefcd89 | ||
![]() |
a07f5cdf42 | ||
![]() |
a1d899d400 | ||
![]() |
886e1a378c | ||
![]() |
47b7ba4e79 | ||
![]() |
79433cef7a | ||
![]() |
c5eb8f58b4 | ||
![]() |
03b7128b60 | ||
![]() |
15b358bec6 | ||
![]() |
a53e392afb | ||
![]() |
4fec647b9d | ||
![]() |
d7b28fb4d3 | ||
![]() |
9bc9291fc9 | ||
![]() |
df7a318ec0 | ||
![]() |
908a856079 | ||
![]() |
8d64b6484f | ||
![]() |
399df854ea | ||
![]() |
328441cdc6 | ||
![]() |
5ca0cbff8e | ||
![]() |
ab09846df7 | ||
![]() |
cd3a9ad38d | ||
![]() |
adc5f35237 | ||
![]() |
0b984e429b | ||
![]() |
eec843a325 | ||
![]() |
83868a48b7 | ||
![]() |
98d337af21 | ||
![]() |
b2c7dc00cc | ||
![]() |
44ddc5a02b | ||
![]() |
f036bba48c | ||
![]() |
0fe2ce7fac | ||
![]() |
0147b92230 | ||
![]() |
4047bccf6c | ||
![]() |
363c0fdf4b | ||
![]() |
c46407b2d3 | ||
![]() |
ca0f5dabea | ||
![]() |
17d4106e1b | ||
![]() |
442d38080e | ||
![]() |
87ec3af5bb | ||
![]() |
1a8af33ff6 | ||
![]() |
ff749d8863 | ||
![]() |
2d86ddd37f | ||
![]() |
e1bbb9d8de | ||
![]() |
d7964be29c | ||
![]() |
3fef64f584 | ||
![]() |
319b6503a5 | ||
![]() |
d40a6082fa | ||
![]() |
28809b82a2 | ||
![]() |
c9f02c32d4 | ||
![]() |
55d5b80dfe | ||
![]() |
33f25acb08 | ||
![]() |
0e9066f6ed | ||
![]() |
7d2e30096b | ||
![]() |
0e9d6460db | ||
![]() |
927163bf13 | ||
![]() |
8ac1cf6e45 | ||
![]() |
dba79ba223 | ||
![]() |
905be6431b | ||
![]() |
ad95d6ba04 | ||
![]() |
b77690a373 | ||
![]() |
84a734dc87 | ||
![]() |
5079b64ab5 | ||
![]() |
6a343488d2 | ||
![]() |
98c3ef60e6 | ||
![]() |
73fa351b1c | ||
![]() |
c88f7fc307 | ||
![]() |
55b8712268 | ||
![]() |
7878f0c514 | ||
![]() |
0f09e2ecfe | ||
![]() |
bea3acd4b6 | ||
![]() |
fb9004d6b2 | ||
![]() |
42b7e7bc56 | ||
![]() |
4b2ddd5b6e | ||
![]() |
b3006221f1 | ||
![]() |
e57108e7c9 | ||
![]() |
6b3dc6687b | ||
![]() |
92f6f9f973 | ||
![]() |
a56a4c00dd | ||
![]() |
ee4a115d4c | ||
![]() |
976a58c918 | ||
![]() |
db82aa1b77 | ||
![]() |
d05504c50f | ||
![]() |
f1f464e364 | ||
![]() |
57b875a955 | ||
![]() |
ea5d32ddff | ||
![]() |
da8c8ccaf5 | ||
![]() |
dcbe4b3e1a | ||
![]() |
68cebffe13 | ||
![]() |
96e7f3224a | ||
![]() |
f6d83c97bb | ||
![]() |
74f76cf4e9 | ||
![]() |
8b8725d1fd | ||
![]() |
20494f799d | ||
![]() |
dd13e16bc7 | ||
![]() |
11057da373 | ||
![]() |
381dc8fb43 | ||
![]() |
780fad46f2 | ||
![]() |
2ca5ffa06a | ||
![]() |
f349ba8750 | ||
![]() |
33e3ca524e | ||
![]() |
ea1a71dc07 | ||
![]() |
ae820293a2 | ||
![]() |
f68f42cb11 | ||
![]() |
7f58ad45fa | ||
![]() |
aa7c17989b | ||
![]() |
6b6afc4077 | ||
![]() |
69a1419ab1 | ||
![]() |
080e9981c7 | ||
![]() |
8cc00ab486 | ||
![]() |
40fad4bbb5 | ||
![]() |
232af9aa0d | ||
![]() |
5bf2ff98c9 | ||
![]() |
570e733a51 | ||
![]() |
cffcd57edb | ||
![]() |
1496ac9b55 | ||
![]() |
290e25917c | ||
![]() |
0360668cc1 | ||
![]() |
343a4753c7 | ||
![]() |
d827f42d38 | ||
![]() |
5843e67a90 | ||
![]() |
517df133e3 | ||
![]() |
621114fbe1 | ||
![]() |
2066051d3a | ||
![]() |
d94cbd870c | ||
![]() |
48f15dcf3d | ||
![]() |
35a60b8e04 | ||
![]() |
4b3df09155 | ||
![]() |
b1215c2ce2 | ||
![]() |
99ac03f9f3 | ||
![]() |
a0aa45a4a7 | ||
![]() |
aab3a92890 | ||
![]() |
37020dc8da | ||
![]() |
d66d3a2d09 | ||
![]() |
f057195a4f | ||
![]() |
378bf70d4b | ||
![]() |
1ccf0bd7d8 | ||
![]() |
ddbfddce88 | ||
![]() |
ea19cf9d8d | ||
![]() |
3b69482a2f | ||
![]() |
778fbb4669 | ||
![]() |
13533e359a | ||
![]() |
3c2d0aa667 | ||
![]() |
5551de4b8a | ||
![]() |
fa51b90094 | ||
![]() |
bfd1ea3877 | ||
![]() |
abfb2c064d | ||
![]() |
4f7517115c | ||
![]() |
1621b9bad0 | ||
![]() |
d2bf42f8b4 | ||
![]() |
d1a46faf84 | ||
![]() |
39f1d99dcc | ||
![]() |
1f04ec9575 | ||
![]() |
ac2e081528 | ||
![]() |
95ac9ebb8a | ||
![]() |
c41b006be1 | ||
![]() |
92fb995505 | ||
![]() |
3c94621142 | ||
![]() |
2d720a1e0b | ||
![]() |
0269388aa7 | ||
![]() |
4b2aab09b5 | ||
![]() |
1c7434a8f0 | ||
![]() |
20f8f67928 | ||
![]() |
159535261d | ||
![]() |
a840e891fe | ||
![]() |
a7c704c39d | ||
![]() |
e1c0eb2187 | ||
![]() |
aa8ab9fcca | ||
![]() |
a746959fc1 | ||
![]() |
ee34eb2180 | ||
![]() |
844b901005 | ||
![]() |
83ebc13a37 | ||
![]() |
82c8f2d8f0 | ||
![]() |
a11cc8840e | ||
![]() |
35ee6ce62d | ||
![]() |
37861cb99f | ||
![]() |
a178d05023 | ||
![]() |
ee9ccfe2e3 | ||
![]() |
c6c4b4a871 | ||
![]() |
273c6a75a2 | ||
![]() |
1384bf02f9 | ||
![]() |
fb7b670b76 | ||
![]() |
9ac5b075cf | ||
![]() |
0124b6b9c9 | ||
![]() |
691a1479fc | ||
![]() |
c9d69b082b | ||
![]() |
e24e04be57 | ||
![]() |
38f9241316 | ||
![]() |
3862ff269b | ||
![]() |
9e5321eab8 | ||
![]() |
f2cf7cf281 | ||
![]() |
8961d3573e | ||
![]() |
26570d05c1 | ||
![]() |
8627f668f2 | ||
![]() |
d8ca46066d | ||
![]() |
c00c5a89e5 | ||
![]() |
14b7936c3b | ||
![]() |
40b41ac6e4 | ||
![]() |
fd6de6b6ae | ||
![]() |
f3111bcbef | ||
![]() |
e6be472831 | ||
![]() |
e5217f26e2 | ||
![]() |
7f7acf7837 | ||
![]() |
baae4b2e71 | ||
![]() |
42448c5f37 | ||
![]() |
fc7875675c | ||
![]() |
355261e49e | ||
![]() |
44c840b31d | ||
![]() |
1bc068a583 | ||
![]() |
340686a383 | ||
![]() |
1ad87c6ba6 | ||
![]() |
eadf5eddbc | ||
![]() |
f4f58003fb | ||
![]() |
bda4882a65 | ||
![]() |
77ddee9314 | ||
![]() |
c9676c79d1 | ||
![]() |
18095ee87b | ||
![]() |
c4d07f67e3 | ||
![]() |
205165bec5 | ||
![]() |
870b38837b | ||
![]() |
10d4b7a878 | ||
![]() |
abed97cf33 | ||
![]() |
f10d8dab5e | ||
![]() |
5185d534bc | ||
![]() |
a520de447e | ||
![]() |
4121ae50b5 | ||
![]() |
87c4bf1df9 | ||
![]() |
09339bf500 | ||
![]() |
af9edb6ba4 | ||
![]() |
b2ec1d331c | ||
![]() |
213d3af3b0 | ||
![]() |
4804824c78 | ||
![]() |
d89e3f3014 | ||
![]() |
9ab9b852c2 | ||
![]() |
2a257a8252 | ||
![]() |
0e1f0e3c73 | ||
![]() |
078b65905a | ||
![]() |
417f52e001 | ||
![]() |
2bca8fa677 | ||
![]() |
721b63f3a0 | ||
![]() |
14e65ff3b4 | ||
![]() |
3282dae09b | ||
![]() |
7b297eb895 | ||
![]() |
bae6b1cec8 | ||
![]() |
f4ac640252 | ||
![]() |
7c627da986 | ||
![]() |
d52f5db6ba | ||
![]() |
66672b4052 | ||
![]() |
ed6be92de4 | ||
![]() |
2def02ea74 | ||
![]() |
52b0ea328f | ||
![]() |
960107d00f | ||
![]() |
bbc902b4d6 | ||
![]() |
54549235da | ||
![]() |
231f983600 | ||
![]() |
891d355679 | ||
![]() |
87fbc406f5 | ||
![]() |
08b5a52ccd | ||
![]() |
14a28d7fc3 | ||
![]() |
5a79b401b0 | ||
![]() |
5e4444823c | ||
![]() |
5ff7635447 | ||
![]() |
709ef36b4f | ||
![]() |
7f0b59dc37 | ||
![]() |
9e8c532e61 | ||
![]() |
f2be09f4e4 | ||
![]() |
3ff9abca3a | ||
![]() |
3d630c6f7f | ||
![]() |
9f4f945d4f | ||
![]() |
a0490a8720 | ||
![]() |
aba962c12c | ||
![]() |
aa21e3c731 | ||
![]() |
5b9d88b3ad | ||
![]() |
8bce430f4d | ||
![]() |
c6f8de90aa | ||
![]() |
6b65b0c982 | ||
![]() |
f5c2673878 | ||
![]() |
8e92bfc8f0 | ||
![]() |
d7adb9ef6e | ||
![]() |
6634f1e75c | ||
![]() |
6aba19193a | ||
![]() |
eb1aabe9e3 | ||
![]() |
714f181d81 | ||
![]() |
fd44accc79 | ||
![]() |
43edd6b77e | ||
![]() |
427c19d65c | ||
![]() |
6db68d0295 | ||
![]() |
abe8ba769e | ||
![]() |
96fb17b711 | ||
![]() |
63e5633d62 | ||
![]() |
299d41660b | ||
![]() |
1ec87b7beb | ||
![]() |
0475107882 | ||
![]() |
75f8d7ebb5 | ||
![]() |
7c97854b6f | ||
![]() |
5f4d4a87f7 | ||
![]() |
c1ce7300d5 | ||
![]() |
e118c4d8e9 | ||
![]() |
5fe779703d | ||
![]() |
15a5a42eb1 | ||
![]() |
5b974158f9 | ||
![]() |
1c0a7f14e8 | ||
![]() |
7ec8912591 | ||
![]() |
83da6a3378 | ||
![]() |
cad02a4681 | ||
![]() |
c967f1d570 | ||
![]() |
be3efc979b | ||
![]() |
5c5f54c6d6 | ||
![]() |
6f8f04e1f8 | ||
![]() |
afd821010d | ||
![]() |
bcc882cbf1 | ||
![]() |
75b80c277f | ||
![]() |
096d1befc9 | ||
![]() |
2bf6187a88 | ||
![]() |
8ed8795268 | ||
![]() |
6e32ea3418 | ||
![]() |
8b2171f78a | ||
![]() |
92f1234aaa | ||
![]() |
73645c8348 | ||
![]() |
662c0768cb | ||
![]() |
43150ef849 | ||
![]() |
3f18b659a0 | ||
![]() |
6b81b0bed6 | ||
![]() |
f0af89a204 | ||
![]() |
550c2b9042 | ||
![]() |
c8cda08209 | ||
![]() |
2b03339235 | ||
![]() |
6e1fd0eab6 | ||
![]() |
5336e74bd4 | ||
![]() |
afeaed790f | ||
![]() |
aed531a8a9 | ||
![]() |
eee78c6c10 | ||
![]() |
ab5fe3dec5 | ||
![]() |
b741350afd | ||
![]() |
8b6dfbd9c8 | ||
![]() |
4b2666b9d6 | ||
![]() |
854f704a2f | ||
![]() |
138b2e7415 | ||
![]() |
e1f54de9ac | ||
![]() |
61a6fdd767 | ||
![]() |
77c23dd85f |
@@ -1,2 +1,3 @@
|
||||
bin/
|
||||
cross-out/
|
||||
release-out/
|
4
.github/CODE_OF_CONDUCT.md
vendored
Normal file
4
.github/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Code of conduct
|
||||
|
||||
- [Moby community guidelines](https://github.com/moby/moby/blob/master/CONTRIBUTING.md#moby-community-guidelines)
|
||||
- [Docker Code of Conduct](https://github.com/docker/code-of-conduct)
|
292
.github/CONTRIBUTING.md
vendored
Normal file
292
.github/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
# Contribute to the Buildx project
|
||||
|
||||
This page contains information about reporting issues as well as some tips and
|
||||
guidelines useful to experienced open source contributors.
|
||||
|
||||
## Reporting security issues
|
||||
|
||||
The project maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
**Please _DO NOT_ file a public issue**, instead send your report privately to
|
||||
[security@docker.com](mailto:security@docker.com).
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you for it.
|
||||
We also like to send gifts—if you're into schwag, make sure to let
|
||||
us know. We currently do not offer a paid security bounty program, but are not
|
||||
ruling it out in the future.
|
||||
|
||||
|
||||
## Reporting other issues
|
||||
|
||||
A great way to contribute to the project is to send a detailed report when you
|
||||
encounter an issue. We always appreciate a well-written, thorough bug report,
|
||||
and will thank you for it!
|
||||
|
||||
Check that [our issue database](https://github.com/docker/buildx/issues)
|
||||
doesn't already include that problem or suggestion before submitting an issue.
|
||||
If you find a match, you can use the "subscribe" button to get notified on
|
||||
updates. Do *not* leave random "+1" or "I have this too" comments, as they
|
||||
only clutter the discussion, and don't help resolving it. However, if you
|
||||
have ways to reproduce the issue or have additional information that may help
|
||||
resolving the issue, please leave a comment.
|
||||
|
||||
Include the steps required to reproduce the problem if possible and applicable.
|
||||
This information will help us review and fix your issue faster. When sending
|
||||
lengthy log-files, consider posting them as an attachment, instead of posting
|
||||
inline.
|
||||
|
||||
**Do not forget to remove sensitive data from your logfiles before submitting**
|
||||
(you can replace those parts with "REDACTED").
|
||||
|
||||
### Pull requests are always welcome
|
||||
|
||||
Not sure if that typo is worth a pull request? Found a bug and know how to fix
|
||||
it? Do it! We will appreciate it.
|
||||
|
||||
If your pull request is not accepted on the first try, don't be discouraged! If
|
||||
there's a problem with the implementation, hopefully you received feedback on
|
||||
what to improve.
|
||||
|
||||
We're trying very hard to keep Buildx lean and focused. We don't want it to
|
||||
do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement that
|
||||
feature *on top of* Buildx.
|
||||
|
||||
### Design and cleanup proposals
|
||||
|
||||
You can propose new designs for existing features. You can also design
|
||||
entirely new features. We really appreciate contributors who want to refactor or
|
||||
otherwise cleanup our project.
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
**Use your real name** (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Run the unit- and integration-tests
|
||||
|
||||
To enter a demo container environment and experiment, you may run:
|
||||
|
||||
```
|
||||
$ make shell
|
||||
```
|
||||
|
||||
To validate PRs before submitting them you should run:
|
||||
|
||||
```
|
||||
$ make validate-all
|
||||
```
|
||||
|
||||
To generate new vendored files with go modules run:
|
||||
|
||||
```
|
||||
$ make vendor
|
||||
```
|
||||
|
||||
|
||||
### Conventions
|
||||
|
||||
- Fork the repository and make changes on your fork in a feature branch
|
||||
- Submit tests for your changes. See [run the unit- and integration-tests](#run-the-unit--and-integration-tests)
|
||||
for details.
|
||||
- [Sign your work](#sign-your-work)
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plug-ins that do this automatically.
|
||||
|
||||
Pull request descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address. Be sure that the [commit
|
||||
messages](#commit-messages) also contain the relevant information.
|
||||
|
||||
### Successful Changes
|
||||
|
||||
Before contributing large or high impact changes, make the effort to coordinate
|
||||
with the maintainers of the project before submitting a pull request. This
|
||||
prevents you from doing extra work that may or may not be merged.
|
||||
|
||||
Large PRs that are just submitted without any prior communication are unlikely
|
||||
to be successful.
|
||||
|
||||
While pull requests are the methodology for submitting changes to code, changes
|
||||
are much more likely to be accepted if they are accompanied by additional
|
||||
engineering work. While we don't define this explicitly, most of these goals
|
||||
are accomplished through communication of the design goals and subsequent
|
||||
solutions. Often times, it helps to first state the problem before presenting
|
||||
solutions.
|
||||
|
||||
Typically, the best methods of accomplishing this are to submit an issue,
|
||||
stating the problem. This issue can include a problem statement and a
|
||||
checklist with requirements. If solutions are proposed, alternatives should be
|
||||
listed and eliminated. Even if the criteria for elimination of a solution is
|
||||
frivolous, say so.
|
||||
|
||||
Larger changes typically work best with design documents. These are focused on
|
||||
providing context to the design at the time the feature was conceived and can
|
||||
inform future documentation contributions.
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50 chars)
|
||||
written in the imperative, followed by an optional, more detailed explanatory
|
||||
text which is separated from the summary by an empty line.
|
||||
|
||||
Commit messages should follow best practices, including explaining the context
|
||||
of the problem and how it was solved, including in caveats or follow up changes
|
||||
required. They should tell the story of the change and provide readers
|
||||
understanding of what led to it.
|
||||
|
||||
If you're lost about what this even means, please see [How to Write a Git
|
||||
Commit Message](http://chris.beams.io/posts/git-commit/) for a start.
|
||||
|
||||
In practice, the best approach to maintaining a nice commit message is to
|
||||
leverage a `git add -p` and `git commit --amend` to formulate a solid
|
||||
changeset. This allows one to piece together a change, as information becomes
|
||||
available.
|
||||
|
||||
If you squash a series of commits, don't just submit that. Re-write the commit
|
||||
message, as if the series of commits was a single stroke of brilliance.
|
||||
|
||||
That said, there is no requirement to have a single commit for a PR, as long as
|
||||
each commit tells the story. For example, if there is a feature that requires a
|
||||
package, it might make sense to have the package in a separate commit then have
|
||||
a subsequent commit that uses it.
|
||||
|
||||
Remember, you're telling part of the story with the commit message. Don't make
|
||||
your chapter weird.
|
||||
|
||||
### Review
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Post
|
||||
a comment after pushing. New commits show up in the pull request automatically,
|
||||
but the reviewers are notified only when you comment.
|
||||
|
||||
Pull requests must be cleanly rebased on top of master without multiple branches
|
||||
mixed into the PR.
|
||||
|
||||
> **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
|
||||
> feature branch to update your pull request rather than `merge master`.
|
||||
|
||||
Before you make a pull request, squash your commits into logical units of work
|
||||
using `git rebase -i` and `git push -f`. A logical unit of work is a consistent
|
||||
set of patches that should be reviewed together: for example, upgrading the
|
||||
version of a vendored dependency and taking advantage of its now available new
|
||||
feature constitute two separate units of work. Implementing a new function and
|
||||
calling it in another file constitute a single logical unit of work. The very
|
||||
high majority of submissions should have a single commit, so if in doubt: squash
|
||||
down to one.
|
||||
|
||||
- After every commit, [make sure the test suite passes](#run-the-unit--and-integration-tests).
|
||||
Include documentation changes in the same pull request so that a revert would
|
||||
remove all traces of the feature or fix.
|
||||
- Include an issue reference like `closes #XXXX` or `fixes #XXXX` in the PR
|
||||
description that close an issue. Including references automatically closes
|
||||
the issue on a merge.
|
||||
- Do not add yourself to the `AUTHORS` file, as it is regenerated regularly
|
||||
from the Git history.
|
||||
- See the [Coding Style](#coding-style) for further guidelines.
|
||||
|
||||
|
||||
### Merge approval
|
||||
|
||||
Project maintainers use LGTM (Looks Good To Me) in comments on the code review to
|
||||
indicate acceptance, or use the Github review approval feature.
|
||||
|
||||
|
||||
## Coding Style
|
||||
|
||||
Unless explicitly stated, we follow all coding guidelines from the Go
|
||||
community. While some of these standards may seem arbitrary, they somehow seem
|
||||
to result in a solid, consistent codebase.
|
||||
|
||||
It is possible that the code base does not currently comply with these
|
||||
guidelines. We are not looking for a massive PR that fixes this, since that
|
||||
goes against the spirit of the guidelines. All new contributions should make a
|
||||
best effort to clean up and make the code base better than they left it.
|
||||
Obviously, apply your best judgement. Remember, the goal here is to make the
|
||||
code base easier for humans to navigate and understand. Always keep that in
|
||||
mind when nudging others to comply.
|
||||
|
||||
The rules:
|
||||
|
||||
1. All code should be formatted with `gofmt -s`.
|
||||
2. All code should pass the default levels of
|
||||
[`golint`](https://github.com/golang/lint).
|
||||
3. All code should follow the guidelines covered in [Effective
|
||||
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
4. Comment the code. Tell us the why, the history and the context.
|
||||
5. Document _all_ declarations and methods, even private ones. Declare
|
||||
expectations, caveats and anything else that may be important. If a type
|
||||
gets exported, having the comments already there will ensure it's ready.
|
||||
6. Variable name length should be proportional to its context and no longer.
|
||||
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
||||
In practice, short methods will have short variable names and globals will
|
||||
have longer names.
|
||||
7. No underscores in package names. If you need a compound name, step back,
|
||||
and re-examine why you need a compound name. If you still think you need a
|
||||
compound name, lose the underscore.
|
||||
8. No utils or helpers packages. If a function is not general enough to
|
||||
warrant its own package, it has not been written generally enough to be a
|
||||
part of a util package. Just leave it unexported and well-documented.
|
||||
9. All tests should run with `go test` and outside tooling should not be
|
||||
required. No, we don't need another unit testing framework. Assertion
|
||||
packages are acceptable if they provide _real_ incremental value.
|
||||
10. Even though we call these "rules" above, they are actually just
|
||||
guidelines. Since you've read all the rules, you now know that.
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](https://blog.golang.org) is also a great resource.
|
213
.github/workflows/build.yml
vendored
Normal file
213
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||
CACHEKEY_BINARIES: "binaries"
|
||||
PLATFORMS: "linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64"
|
||||
|
||||
jobs:
|
||||
base:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Build ${{ env.CACHEKEY_BINARIES }}
|
||||
run: |
|
||||
./hack/build_ci_first_pass binaries
|
||||
env:
|
||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
CACHEDIR_TO: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new
|
||||
-
|
||||
# FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850
|
||||
name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
mv /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [base]
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make test
|
||||
env:
|
||||
TEST_COVERAGE: 1
|
||||
TESTFLAGS: -v --parallel=6 --timeout=20m
|
||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: ./coverage/coverage.txt
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [base]
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Cross
|
||||
run: |
|
||||
make cross
|
||||
env:
|
||||
TARGETPLATFORM: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
|
||||
binaries:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test, cross]
|
||||
env:
|
||||
RELEASE_OUT: ./release-out
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
TAG=pr
|
||||
if [[ $GITHUB_REF == refs/tags/v* ]]; then
|
||||
TAG=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
fi
|
||||
echo ::set-output name=tag::${TAG}
|
||||
-
|
||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Build ${{ steps.prep.outputs.tag }}
|
||||
run: |
|
||||
./hack/release ${{ env.RELEASE_OUT }}
|
||||
env:
|
||||
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: buildx
|
||||
path: ${{ env.RELEASE_OUT }}/*
|
||||
if-no-files-found: error
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_SLUG }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
-
|
||||
name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
target: binaries
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||
platforms: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
-
|
||||
name: GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
draft: true
|
||||
files: ${{ env.RELEASE_OUT }}/*
|
||||
name: ${{ steps.prep.outputs.tag }}
|
25
.github/workflows/godev.yml
vendored
Normal file
25
.github/workflows/godev.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Workflow used to make a request to proxy.golang.org to refresh cache on https://pkg.go.dev/github.com/docker/buildx
|
||||
# when a released of buildx is produced
|
||||
name: godev
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.13
|
||||
-
|
||||
name: Call pkg.go.dev
|
||||
run: |
|
||||
go get github.com/${GITHUB_REPOSITORY}@${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
39
.github/workflows/validate.yml
vendored
Normal file
39
.github/workflows/validate.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: validate
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- lint
|
||||
- validate-vendor
|
||||
- validate-docs
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Run
|
||||
run: |
|
||||
make ${{ matrix.target }}
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
bin
|
||||
cross-out
|
||||
coverage
|
||||
cross-out
|
||||
release-out
|
||||
|
30
.golangci.yml
Normal file
30
.golangci.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
run:
|
||||
timeout: 10m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
|
||||
modules-download-mode: vendor
|
||||
|
||||
build-tags:
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- gofmt
|
||||
- govet
|
||||
- deadcode
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- unused
|
||||
- varcheck
|
||||
- golint
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- structcheck
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- golint
|
||||
text: "stutters"
|
6
.mailmap
Normal file
6
.mailmap
Normal file
@@ -0,0 +1,6 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors`.
|
||||
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
35
.travis.yml
35
.travis.yml
@@ -1,35 +0,0 @@
|
||||
dist: trusty
|
||||
sudo: required
|
||||
|
||||
install:
|
||||
- docker run --name buildkit --rm -d --privileged -p 1234:1234 $REPO_SLUG_ORIGIN --addr tcp://0.0.0.0:1234
|
||||
- sudo docker cp buildkit:/usr/bin/buildctl /usr/bin/
|
||||
- export BUILDKIT_HOST=tcp://0.0.0.0:1234
|
||||
|
||||
env:
|
||||
global:
|
||||
- PLATFORMS="linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le"
|
||||
- CROSS_PLATFORMS="${PLATFORMS},darwin/amd64,windows/amd64"
|
||||
- PREFER_BUILDCTL="1"
|
||||
|
||||
script:
|
||||
- make binaries validate-all && TARGETPLATFORM="${CROSS_PLATFORMS}" ./hack/cross
|
||||
|
||||
|
||||
deploy:
|
||||
- provider: script
|
||||
script: PLATFORMS="${CROSS_PLATFORMS}" ./hack/release $TRAVIS_TAG release-out
|
||||
on:
|
||||
repo: docker/buildx
|
||||
tags: true
|
||||
condition: $TRAVIS_TAG =~ ^v[0-9]
|
||||
- provider: releases
|
||||
api_key:
|
||||
secure: "VKVL+tyS3BfqjM4VMGHoHJbcKY4mqq4AGrclVEvBnt0gm1LkGeKxSheCZgF1EC4oSV8rCy6dkoRWL0PLkl895MIl20Z4v53o1NOQ4Fn0A+eptnrld8jYUkL5PcD+kdEqv2GkBn7vO6E/fwYY/wH9FYlE+fXUa0c/YQGqNGS+XVDtgkftqBV+F2EzaIwk+D+QClFBRmKvIbXrUQASi1K6K2eT3gvzR4zh679TSdI2nbnTKtE06xG1PBFVmb1Ux3/Jz4yHFvf2d3M1mOyqIBsozKoyxisiFQxnm3FjhPrdlZJ9oy/nsQM3ahQKJ3DF8hiLI1LxcxRa6wo//t3uu2eJSYl/c5nu0T7gVw4sChQNy52fUhEGoDTDwYoAxsLSDXcpj1jevRsKvxt/dh2e2De1a9HYj5oM+z2O+pcyiY98cKDbhe2miUqUdiYMBy24xUunB46zVcJF3pIqCYtw5ts8ES6Ixn3u+4OGV/hMDrVdiG2bOZtNVkdbKMEkOEBGa3parPJ69jh6og639kdAD3DFxyZn3YKYuJlcNShn3tj6iPokBYhlLwwf8vuEV7gK7G0rDS9yxuF03jgkwpBBF2wy+u1AbJv241T7v2ZB8H8VlYyHA0E5pnoWbw+lIOTy4IAc8gIesMvDuFFi4r1okhiAt/24U0p4aAohjh1nPuU3spY="
|
||||
file: release-out/**/*
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
on:
|
||||
repo: docker/buildx
|
||||
tags: true
|
||||
condition: $TRAVIS_TAG =~ ^v[0-9]
|
13
.yamllint.yml
Normal file
13
.yamllint.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
ignore: |
|
||||
/vendor
|
||||
|
||||
extends: default
|
||||
|
||||
yaml-files:
|
||||
- '*.yaml'
|
||||
- '*.yml'
|
||||
|
||||
rules:
|
||||
truthy: disable
|
||||
line-length: disable
|
||||
document-start: disable
|
7
AUTHORS
Normal file
7
AUTHORS
Normal file
@@ -0,0 +1,7 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `scripts/generate-authors.sh`.
|
||||
|
||||
Bin Du <bindu@microsoft.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
26
Dockerfile
26
Dockerfile
@@ -1,15 +1,25 @@
|
||||
# syntax=docker/dockerfile:1.1-experimental
|
||||
# syntax=docker/dockerfile:1.2
|
||||
|
||||
ARG DOCKERD_VERSION=19.03-rc
|
||||
ARG DOCKERD_VERSION=19.03
|
||||
ARG CLI_VERSION=19.03
|
||||
|
||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||
|
||||
# xgo is a helper for golang cross-compilation
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:6f7d999551dd471b58f70716754290495690efa8421e0a1fcf18eb11d0c0a537 AS xgo
|
||||
# xx is a helper for cross-compilation
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:1.12-alpine AS gobase
|
||||
COPY --from=xgo / /
|
||||
FROM --platform=$BUILDPLATFORM golang:1.16-alpine AS golatest
|
||||
|
||||
FROM golatest AS go-linux
|
||||
FROM golatest AS go-darwin
|
||||
FROM golatest AS go-windows-amd64
|
||||
FROM golatest AS go-windows-386
|
||||
FROM golatest AS go-windows-arm
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17beta1-alpine AS go-windows-arm64
|
||||
FROM go-windows-${TARGETARCH} AS go-windows
|
||||
|
||||
FROM go-${TARGETOS} AS gobase
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache file git
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
WORKDIR /src
|
||||
@@ -26,8 +36,8 @@ ARG TARGETPLATFORM
|
||||
RUN --mount=target=. --mount=target=/root/.cache,type=cache \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
||||
set -x; go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
||||
file /usr/bin/buildx && file /usr/bin/buildx | egrep "statically linked|Mach-O|Windows"
|
||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
||||
xx-verify --static /usr/bin/buildx
|
||||
|
||||
FROM buildx-build AS integration-tests
|
||||
COPY . .
|
||||
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
204
MAINTAINERS
Normal file
204
MAINTAINERS
Normal file
@@ -0,0 +1,204 @@
|
||||
# Buildx maintainers file
|
||||
#
|
||||
# This file describes the maintainer groups within the project.
|
||||
# More detail on Moby project governance is available in the
|
||||
# https://github.com/moby/moby/blob/master/project/GOVERNANCE.md file.
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant
|
||||
# parser.
|
||||
#
|
||||
|
||||
[Rules]
|
||||
|
||||
[Rules.maintainers]
|
||||
|
||||
title = "What is a maintainer?"
|
||||
|
||||
text = """
|
||||
There are different types of maintainers, with different
|
||||
responsibilities, but all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve
|
||||
the project.
|
||||
3) They spend that time doing whatever needs to be done, not
|
||||
necessarily what is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder
|
||||
to appreciate. It's easy to appreciate a really cool and technically
|
||||
advanced feature. It's harder to appreciate the absence of bugs, the
|
||||
slow but steady improvement in stability, or the reliability of a
|
||||
release process. But those things distinguish a good project from a
|
||||
great one.
|
||||
"""
|
||||
|
||||
[Rules.adding-maintainers]
|
||||
|
||||
title = "How are maintainers added?"
|
||||
|
||||
text = """
|
||||
Maintainers are first and foremost contributors that have shown they
|
||||
are committed to the long term success of a project. Contributors
|
||||
wanting to become maintainers are expected to be deeply involved in
|
||||
contributing code, pull request review, and triage of issues in the
|
||||
project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building
|
||||
trust with the current maintainers of the project and being a person
|
||||
that they can depend on and trust to make decisions in the best
|
||||
interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors
|
||||
that have shown regular activity on the project over the prior
|
||||
months. From this list, maintainer candidates are selected.
|
||||
|
||||
After a candidate has been announced, the existing maintainers are
|
||||
given five business days to discuss the candidate, raise objections
|
||||
and cast their vote. Candidates must be approved by at least 66% of
|
||||
the current maintainers by adding their vote on the slack
|
||||
channel. Only maintainers of the repository that the candidate is
|
||||
proposed for are allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to
|
||||
invite the candidate to open a pull request that adds the contributor
|
||||
to the MAINTAINERS file. The candidate becomes a maintainer once the
|
||||
pull request is merged.
|
||||
"""
|
||||
|
||||
[Rules.stepping-down-policy]
|
||||
|
||||
title = "Stepping down policy"
|
||||
|
||||
text = """
|
||||
Life priorities, interests, and passions can change. If you're a
|
||||
maintainer but feel you must remove yourself from the list, inform
|
||||
other maintainers that you intend to step down, and if possible, help
|
||||
find someone to pick up your work. At the very least, ensure your
|
||||
work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to
|
||||
remove yourself from the MAINTAINERS file.
|
||||
"""
|
||||
|
||||
[Rules.inactive-maintainers]
|
||||
|
||||
title = "Removal of inactive maintainers"
|
||||
|
||||
text = """
|
||||
Similar to the procedure for adding new maintainers, existing
|
||||
maintainers can be removed from the list if they do not show
|
||||
significant activity on the project. Periodically, the maintainers
|
||||
review the list of maintainers and their activity over the last three
|
||||
months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a
|
||||
neutral person will contact the maintainer to ask if they want to
|
||||
continue being a maintainer. If the maintainer decides to step down as
|
||||
a maintainer, they open a pull request to be removed from the
|
||||
MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to
|
||||
perform the required duties they can be removed with a vote of at
|
||||
least 66% of the current maintainers. The voting period is five
|
||||
business days. Issues related to a maintainer's performance should be
|
||||
discussed with them among the other maintainers so that they are not
|
||||
surprised by a pull request removing them.
|
||||
"""
|
||||
|
||||
[Rules.DCO]
|
||||
|
||||
title = "Helping contributors with the DCO"
|
||||
|
||||
text = """
|
||||
The [DCO or `Sign your work`](
|
||||
https://github.com/moby/buildkit/blob/master/CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some BuildKit contributors are not as familiar with `git`, or have
|
||||
used a web based editor, and thus asking them to `git commit --amend
|
||||
-s` is not the best way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c)
|
||||
of the DCO. The most trivial way for a contributor to allow the
|
||||
maintainer to do this, is to add a DCO signature in a pull requests's
|
||||
comment, or a maintainer can simply note that the change is
|
||||
sufficiently trivial that it does not substantially change the
|
||||
existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
"""
|
||||
|
||||
[Rules."no direct push"]
|
||||
|
||||
title = "I'm a maintainer. Should I make pull requests too?"
|
||||
|
||||
text = """
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
"""
|
||||
|
||||
[Rules.meta]
|
||||
|
||||
title = "How is this process changed?"
|
||||
|
||||
text = "Just like everything else: by making a pull request :)"
|
||||
|
||||
|
||||
[Org]
|
||||
|
||||
[Org.Maintainers]
|
||||
|
||||
people = [
|
||||
"akihirosuda",
|
||||
"crazy-max",
|
||||
"tiborvass",
|
||||
"tonistiigi",
|
||||
]
|
||||
|
||||
[Org.Curators]
|
||||
|
||||
# The curators help ensure that incoming issues and pull requests are properly triaged and
|
||||
# that our various contribution and reviewing processes are respected. With their knowledge of
|
||||
# the repository activity, they can also guide contributors to relevant material or
|
||||
# discussions.
|
||||
#
|
||||
# They are neither code nor docs reviewers, so they are never expected to merge. They can
|
||||
# however:
|
||||
# - close an issue or pull request when it's an exact duplicate
|
||||
# - close an issue or pull request when it's inappropriate or off-topic
|
||||
|
||||
people = [
|
||||
"thajeztah",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
[people.akihirosuda]
|
||||
Name = "Akihiro Suda"
|
||||
Email = "akihiro.suda.cz@hco.ntt.co.jp"
|
||||
GitHub = "AkihiroSuda"
|
||||
|
||||
[people.crazy-max]
|
||||
Name = "Kevin Alvarez"
|
||||
Email = "contact@crazymax.dev"
|
||||
GitHub = "crazy-max"
|
||||
|
||||
[people.thajeztah]
|
||||
Name = "Sebastiaan van Stijn"
|
||||
Email = "github@gone.nl"
|
||||
GitHub = "thaJeztah"
|
||||
|
||||
[people.tiborvass]
|
||||
Name = "Tibor Vass"
|
||||
Email = "tibor@docker.com"
|
||||
GitHub = "tiborvass"
|
||||
|
||||
[people.tonistiigi]
|
||||
Name = "Tõnis Tiigi"
|
||||
Email = "tonis@docker.com"
|
||||
GitHub = "tonistiigi"
|
18
Makefile
18
Makefile
@@ -7,9 +7,12 @@ binaries:
|
||||
binaries-cross:
|
||||
EXPORT_LOCAL=cross-out ./hack/cross
|
||||
|
||||
cross:
|
||||
./hack/cross
|
||||
|
||||
install: binaries
|
||||
mkdir -p ~/.docker/cli-plugins
|
||||
cp bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||
install bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
lint:
|
||||
./hack/lint
|
||||
@@ -20,9 +23,18 @@ test:
|
||||
validate-vendor:
|
||||
./hack/validate-vendor
|
||||
|
||||
validate-all: lint test validate-vendor
|
||||
validate-docs:
|
||||
./hack/validate-docs
|
||||
|
||||
validate-all: lint test validate-vendor validate-docs
|
||||
|
||||
vendor:
|
||||
./hack/update-vendor
|
||||
|
||||
.PHONY: vendor lint shell binaries install binaries-cross validate-all
|
||||
docs:
|
||||
./hack/update-docs
|
||||
|
||||
generate-authors:
|
||||
./hack/generate-authors
|
||||
|
||||
.PHONY: vendor lint shell binaries install binaries-cross validate-all generate-authors validate-docs docs
|
||||
|
605
README.md
605
README.md
@@ -1,96 +1,103 @@
|
||||
# buildx
|
||||
### Docker CLI plugin for extended build capabilities with BuildKit
|
||||
|
||||
_buildx is Tech Preview_
|
||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||
[](https://codecov.io/gh/docker/buildx)
|
||||
|
||||
### TL;DR
|
||||
`buildx` is a Docker CLI plugin for extended build capabilities with [BuildKit](https://github.com/moby/buildkit).
|
||||
|
||||
Key features:
|
||||
|
||||
- Familiar UI from `docker build`
|
||||
- Full BuildKit capabilities with container driver
|
||||
- Multiple builder instance support
|
||||
- Multi-node builds for cross-platform images
|
||||
- Compose build support
|
||||
- WIP: High-level build constructs (`bake`)
|
||||
- TODO: In-container driver support
|
||||
- High-level build constructs (`bake`)
|
||||
- In-container driver support (both Docker and Kubernetes)
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Installing](#installing)
|
||||
- [Docker](#docker)
|
||||
- [Binary release](#binary-release)
|
||||
- [From `Dockerfile`](#from-dockerfile)
|
||||
- [Building](#building)
|
||||
+ [with Docker 18.09+](#with-docker-1809)
|
||||
+ [with buildx or Docker 19.03](#with-buildx-or-docker-1903)
|
||||
- [with Docker 18.09+](#with-docker-1809)
|
||||
- [with buildx or Docker 19.03](#with-buildx-or-docker-1903)
|
||||
- [Getting started](#getting-started)
|
||||
* [Building with buildx](#building-with-buildx)
|
||||
* [Working with builder instances](#working-with-builder-instances)
|
||||
* [Building multi-platform images](#building-multi-platform-images)
|
||||
* [High-level build options](#high-level-build-options)
|
||||
- [Documentation](#documentation)
|
||||
+ [`buildx build [OPTIONS] PATH | URL | -`](#buildx-build-options-path--url---)
|
||||
+ [`buildx create [OPTIONS] [CONTEXT|ENDPOINT]`](#buildx-create-options-contextendpoint)
|
||||
+ [`buildx use NAME`](#buildx-use-name)
|
||||
+ [`buildx inspect [NAME]`](#buildx-inspect-name)
|
||||
+ [`buildx ls`](#buildx-ls)
|
||||
+ [`buildx stop [NAME]`](#buildx-stop-name)
|
||||
+ [`buildx rm [NAME]`](#buildx-rm-name)
|
||||
+ [`buildx bake [OPTIONS] [TARGET...]`](#buildx-bake-options-target)
|
||||
+ [`buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]`](#buildx-imagetools-create-options-source-source)
|
||||
+ [`buildx imagetools inspect NAME`](#buildx-imagetools-inspect-name)
|
||||
- [Building with buildx](#building-with-buildx)
|
||||
- [Working with builder instances](#working-with-builder-instances)
|
||||
- [Building multi-platform images](#building-multi-platform-images)
|
||||
- [High-level build options](#high-level-build-options)
|
||||
- [Documentation](docs/reference)
|
||||
- [`buildx build [OPTIONS] PATH | URL | -`](docs/reference/buildx_build.md)
|
||||
- [`buildx create [OPTIONS] [CONTEXT|ENDPOINT]`](docs/reference/buildx_create.md)
|
||||
- [`buildx use NAME`](docs/reference/buildx_use.md)
|
||||
- [`buildx inspect [NAME]`](docs/reference/buildx_inspect.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx stop [NAME]`](docs/reference/buildx_stop.md)
|
||||
- [`buildx rm [NAME]`](docs/reference/buildx_rm.md)
|
||||
- [`buildx bake [OPTIONS] [TARGET...]`](docs/reference/buildx_bake.md)
|
||||
- [`buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]`](docs/reference/buildx_imagetools_create.md)
|
||||
- [`buildx imagetools inspect NAME`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [Setting buildx as default builder in Docker 19.03+](#setting-buildx-as-default-builder-in-docker-1903)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
|
||||
# Installing
|
||||
|
||||
Using `buildx` as a docker CLI plugin requires using Docker 19.03.0 beta. A limited set of functionality works with older versions of Docker when invoking the binary directly.
|
||||
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer. A limited set of functionality works with older versions of Docker when invoking the binary directly.
|
||||
|
||||
### Docker Desktop (Edge)
|
||||
### Docker
|
||||
|
||||
`buildx` is included with Docker Desktop Edge builds since 19.03.0-beta3.
|
||||
|
||||
For more information see https://docs.docker.com/docker-for-mac/edge-release-notes/
|
||||
|
||||
### Docker CE nightly builds
|
||||
|
||||
`buildx` comes bundled with the Docker CE nightly builds.
|
||||
- Mac: https://download.docker.com/mac/static/nightly/
|
||||
- Linux:
|
||||
```
|
||||
$ # uncomment next line to uninstall previous Docker CE installation if present
|
||||
$ # apt purge docker-ce docker-ce-cli
|
||||
$ curl -fsSL https://get.docker.com/ -o docker-install.sh
|
||||
$ CHANNEL=nightly sh docker-install.sh
|
||||
```
|
||||
`buildx` comes bundled with Docker Desktop and in latest Docker CE packages, but may not be included in all Linux distros (in which case follow the binary release instructions).
|
||||
|
||||
### Binary release
|
||||
|
||||
Download the latest binary release from https://github.com/docker/buildx/releases/latest and copy it to `~/.docker/cli-plugins` folder with name `docker-buildx`.
|
||||
|
||||
Change the permission to execute:
|
||||
```sh
|
||||
chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
```
|
||||
|
||||
### From `Dockerfile`
|
||||
|
||||
Here is how to use buildx inside a Dockerfile through the [`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||
|
||||
```Dockerfile
|
||||
FROM docker
|
||||
COPY --from=docker/buildx-bin:latest /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
||||
RUN docker buildx version
|
||||
```
|
||||
|
||||
After installing you can run `docker buildx` to see the new commands.
|
||||
|
||||
# Building
|
||||
|
||||
|
||||
### with buildx or Docker 19.03+
|
||||
```
|
||||
$ export DOCKER_BUILDKIT=1
|
||||
$ docker build --platform=local -o . git://github.com/docker/buildx
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||
```
|
||||
|
||||
### with Docker 18.09+
|
||||
```
|
||||
$ git clone git://github.com/docker/buildx && cd buildx
|
||||
$ make install
|
||||
```
|
||||
|
||||
### with buildx or Docker 19.03
|
||||
```
|
||||
$ export DOCKER_BUILDKIT=1
|
||||
$ # choose a platform that matches your architecture
|
||||
$ docker build --platform=[darwin,windows,linux,linux/arm64] -o . git://github.com/docker/buildx
|
||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||
```
|
||||
|
||||
# Getting started
|
||||
|
||||
## Building with buildx
|
||||
|
||||
Buildx is a Docker CLI plugin that extends the `docker build` command with the full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit) builder toolkit. It provides the same user experience as `docker build` with many new features like creating scoped builder instances and building against multiple nodes concurrently.
|
||||
|
||||
After installation, buildx can be accessed through the `docker buildx` command. `docker buildx build` is the command for starting a new build.
|
||||
After installation, buildx can be accessed through the `docker buildx` command with Docker 19.03. `docker buildx build` is the command for starting a new build. With Docker versions older than 19.03 buildx binary can be called directly to access the `docker buildx` subcommands.
|
||||
|
||||
```
|
||||
$ docker buildx build .
|
||||
@@ -103,7 +110,7 @@ Buildx will always build using the BuildKit engine and does not require `DOCKER_
|
||||
|
||||
Buildx build command supports the features available for `docker build` including the new features in Docker 19.03 such as outputs configuration, inline build caching or specifying target platform. In addition, buildx supports new features not yet available for regular `docker build` like building manifest lists, distributed caching, exporting build results to OCI image tarballs etc.
|
||||
|
||||
Buildx is supposed to be flexible and can be run in different configurations that are exposed through a driver concept. Currently, we support a "docker" driver that uses the BuildKit library bundled into the docker daemon binary, and a "docker-container" driver that automatically launches BuildKit inside a Docker container. We plan to add more drivers in the future, for example, one that would allow running buildx inside an (unprivileged) container.
|
||||
Buildx is supposed to be flexible and can be run in different configurations that are exposed through a driver concept. Currently, we support a "docker" driver that uses the BuildKit library bundled into the Docker daemon binary, and a "docker-container" driver that automatically launches BuildKit inside a Docker container. We plan to add more drivers in the future, for example, one that would allow running buildx inside an (unprivileged) container.
|
||||
|
||||
The user experience of using buildx is very similar across drivers, but there are some features that are not currently supported by the "docker" driver, because the BuildKit library bundled into docker daemon currently uses a different storage component. In contrast, all images built with "docker" driver are automatically added to the "docker images" view by default, whereas when using other drivers the method for outputting an image needs to be selected with `--output`.
|
||||
|
||||
@@ -116,7 +123,7 @@ Buildx allows you to create new instances of isolated builders. This can be used
|
||||
|
||||
New instances can be created with `docker buildx create` command. This will create a new builder instance with a single node based on your current configuration. To use a remote node you can specify the `DOCKER_HOST` or remote context name while creating the new builder. After creating a new instance you can manage its lifecycle with the `inspect`, `stop` and `rm` commands and list all available builders with `ls`. After creating a new builder you can also append new nodes to it.
|
||||
|
||||
To switch between different builders use `docker buildx use <name>`. After running this command the build commands would automatically keep using this builder.
|
||||
To switch between different builders, use `docker buildx use <name>`. After running this command the build commands would automatically keep using this builder.
|
||||
|
||||
Docker 19.03 also features a new `docker context` command that can be used for giving names for remote Docker API endpoints. Buildx integrates with `docker context` so that all of your contexts automatically get a default builder instance. While creating a new builder instance or when adding a node to it you can also set the context name as the target.
|
||||
|
||||
@@ -128,7 +135,11 @@ When invoking a build, the `--platform` flag can be used to specify the target p
|
||||
|
||||
Multi-platform images can be built by mainly three different strategies that are all supported by buildx and Dockerfiles. You can use the QEMU emulation support in the kernel, build on multiple native nodes using the same builder instance or use a stage in Dockerfile to cross-compile to different architectures.
|
||||
|
||||
QEMU is the easiest way to get started if your node already supports it (e.g. if you are using Docker Desktop). It requires no changes to your Dockerfile and BuildKit will automatically detect the secondary architectures that are available. When BuildKit needs to run a binary for a different architecture it will automatically load it through a binary registered in the binfmt_misc handler.
|
||||
QEMU is the easiest way to get started if your node already supports it (e.g. if you are using Docker Desktop). It requires no changes to your Dockerfile and BuildKit will automatically detect the secondary architectures that are available. When BuildKit needs to run a binary for a different architecture it will automatically load it through a binary registered in the binfmt_misc handler. For QEMU binaries registered with binfmt_misc on the host OS to work transparently inside containers they must be registered with the fix_binary flag. This requires a kernel >= 4.8 and binfmt-support >= 2.1.7. You can check for proper registration by checking if `F` is among the flags in `/proc/sys/fs/binfmt_misc/qemu-*`. While Docker Desktop comes preconfigured with binfmt_misc support for additional platforms, for other installations it likely needs to be installed using [`tonistiigi/binfmt`](https://github.com/tonistiigi/binfmt) image.
|
||||
|
||||
```
|
||||
$ docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
```
|
||||
|
||||
Using multiple native nodes provides better support for more complicated cases not handled by QEMU and generally have better performance. Additional nodes can be added to the builder instance with `--append` flag.
|
||||
|
||||
@@ -143,7 +154,7 @@ $ docker buildx build --platform linux/amd64,linux/arm64 .
|
||||
Finally, depending on your project, the language that you use may have good support for cross-compilation. In that case, multi-stage builds in Dockerfiles can be effectively used to build binaries for the platform specified with `--platform` using the native architecture of the build node. List of build arguments like `BUILDPLATFORM` and `TARGETPLATFORM` are available automatically inside your Dockerfile and can be leveraged by the processes running as part of your build.
|
||||
|
||||
```
|
||||
FROM --platform $BUILDPLATFORM golang:alpine AS build
|
||||
FROM --platform=$BUILDPLATFORM golang:alpine AS build
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" > /log
|
||||
@@ -162,474 +173,7 @@ Currently, the bake command supports building images from compose files, similar
|
||||
|
||||
There is also support for custom build rules from HCL/JSON files allowing better code reuse and different target groups. The design of bake is in very early stages and we are looking for feedback from users.
|
||||
|
||||
|
||||
|
||||
# Documentation
|
||||
|
||||
### `buildx build [OPTIONS] PATH | URL | -`
|
||||
|
||||
The `buildx build` command starts a build using BuildKit. This command is similar to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
Options:
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| --add-host [] | Add a custom host-to-IP mapping (host:ip)
|
||||
| --build-arg [] | Set build-time variables
|
||||
| --cache-from [] | External cache sources (eg. user/app:cache, type=local,src=path/to/dir)
|
||||
| --cache-to [] | Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)
|
||||
| --file string | Name of the Dockerfile (Default is 'PATH/Dockerfile')
|
||||
| --iidfile string | Write the image ID to the file
|
||||
| --label [] | Set metadata for an image
|
||||
| --load | Shorthand for --output=type=docker
|
||||
| --network string | Set the networking mode for the RUN instructions during build (default "default")
|
||||
| --no-cache | Do not use cache when building the image
|
||||
| --output [] | Output destination (format: type=local,dest=path)
|
||||
| --platform [] | Set target platform for build
|
||||
| --progress string | Set type of progress output (auto, plain, tty). Use plain to show container output (default "auto")
|
||||
| --pull | Always attempt to pull a newer version of the image
|
||||
| --push | Shorthand for --output=type=registry
|
||||
| --secret [] | Secret file to expose to the build: id=mysecret,src=/local/secret
|
||||
| --ssh [] | SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])
|
||||
| --tag [] | Name and optionally a tag in the 'name:tag' format
|
||||
| --target string | Set the target build stage to build.
|
||||
|
||||
For documentation on most of these flags refer to `docker build` documentation in https://docs.docker.com/engine/reference/commandline/build/ . In here we’ll document a subset of the new flags.
|
||||
|
||||
#### ` --platform=value[,value]`
|
||||
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile without their own `--platform` flag will pull base images for this platform and this value will also be the platform of the resulting image. The default value will be the current platform of the buildkit daemon.
|
||||
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple values as an input separated by a comma. With multiple values the result will be built for all of the specified platforms and joined together into a single manifest list.
|
||||
|
||||
If the`Dockerfile` needs to invoke the `RUN` command, the builder needs runtime support for the specified platform. In a clean setup, you can only execute `RUN` commands for your system architecture. If your kernel supports binfmt_misc https://en.wikipedia.org/wiki/Binfmt_misc launchers for secondary architectures buildx will pick them up automatically. Docker desktop releases come with binfmt_misc automatically configured for `arm64` and `arm` architectures. You can see what runtime platforms your current builder instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through `TARGETPLATFORM` build argument. Please refer to `docker build` documentation for the full description of automatic platform argument variants https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope .
|
||||
|
||||
The formatting for the platform specifier is defined in https://github.com/containerd/containerd/blob/v1.2.6/platforms/platforms.go#L63 .
|
||||
|
||||
Examples:
|
||||
```
|
||||
docker buildx build --platform=linux/arm64 .
|
||||
docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
docker buildx build --platform=darwin .
|
||||
```
|
||||
|
||||
#### `-o, --output=[PATH,-,type=TYPE[,KEY=VALUE]`
|
||||
|
||||
Sets the export action for the build result. In `docker build` all builds finish by creating a container image and exporting it to `docker images`. `buildx` makes this step configurable allowing results to be exported directly to the client, oci image tarballs, registry etc.
|
||||
|
||||
Supported exported types are:
|
||||
|
||||
##### `local`
|
||||
|
||||
The `local` export type writes all result files to a directory on the client. The new files will be owned by the current user. On multi-platform builds, all results will be put in subdirectories by their platform.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination directory where files will be written
|
||||
|
||||
##### `tar`
|
||||
|
||||
The `tar` export type writes all result files as a single tarball on the client. On multi-platform builds all results will be put in subdirectories by their platform.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
##### `oci`
|
||||
|
||||
The `oci` export type writes the result image or manifest list as an OCI image layout tarball https://github.com/opencontainers/image-spec/blob/master/image-layout.md on the client.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
##### `docker`
|
||||
|
||||
The `docker` export type writes the single-platform result image as a Docker image specification tarball https://github.com/moby/moby/blob/master/image/spec/v1.2.md on the client. Tarballs created by this exporter are also OCI compatible.
|
||||
|
||||
Currently, multi-platform images cannot be exported with the `docker` export type. The most common usecase for multi-platform images is to directly push to a registry (see [`registry`](#registry)).
|
||||
|
||||
Attribute keys:
|
||||
|
||||
- `dest` - destination path where tarball will be written. If not specified the tar will be loaded automatically to the current docker instance.
|
||||
- `context` - name for the docker context where to import the result
|
||||
|
||||
##### `image`
|
||||
|
||||
The `image` exporter writes the build result as an image or a manifest list. When using `docker` driver the image will appear in `docker images`. Optionally image can be automatically pushed to a registry by specifying attributes.
|
||||
|
||||
Attribute keys:
|
||||
|
||||
- `name` - name (references) for the new image.
|
||||
- `push` - boolean to automatically push the image.
|
||||
|
||||
##### `registry`
|
||||
|
||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||
|
||||
|
||||
|
||||
Buildx with `docker` driver currently only supports local, tarball exporter and image exporter. `docker-container` driver supports all the exporters.
|
||||
|
||||
If just the path is specified as a value, `buildx` will use the local exporter with this path as the destination. If the value is “-”, `buildx` will use `tar` exporter and write to `stdout`.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
docker buildx build -o . .
|
||||
docker buildx build -o outdir .
|
||||
docker buildx build -o - - > out.tar
|
||||
docker buildx build -o type=docker .
|
||||
docker buildx build -o type=docker,dest=- . > myimage.tar
|
||||
docker buildx build -t tonistiigi/foo -o type=registry
|
||||
````
|
||||
|
||||
#### `--push`
|
||||
|
||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the build result to registry.
|
||||
|
||||
#### `--load`
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the single-platform build result to `docker images`.
|
||||
|
||||
#### `--cache-from=[NAME|type=TYPE[,KEY=VALUE]]`
|
||||
|
||||
Use an external cache source for a build. Supported types are `registry` and `local`. The `registry` source can import cache from a cache manifest or (special) image configuration on the registry. The `local` source can export cache from local files previously exported with `--cache-to`.
|
||||
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
|
||||
Examples:
|
||||
```
|
||||
docker buildx build --cache-from=user/app:cache .
|
||||
docker buildx build --cache-from=user/app .
|
||||
docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
```
|
||||
|
||||
#### `--cache-to=[NAME|type=TYPE[,KEY=VALUE]]`
|
||||
|
||||
Export build cache to an external cache destination. Supported types are `registry`, `local` and `inline`. Registry exports build cache to a cache manifest in the registry, local exports cache to a local directory on the client and inline writes the cache metadata into the image configuration.
|
||||
|
||||
`docker` driver currently only supports exporting inline cache metadata to image configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. “min” on only exports layers already in the final build build stage, “max” exports layers for all stages. Metadata is always exported for the whole build.
|
||||
|
||||
Examples:
|
||||
```
|
||||
docker buildx build --cache-to=user/app:cache .
|
||||
docker buildx build --cache-to=type=inline .
|
||||
docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
```
|
||||
|
||||
|
||||
### `buildx create [OPTIONS] [CONTEXT|ENDPOINT]`
|
||||
|
||||
Create makes a new builder instance pointing to a docker context or endpoint, where context is the name of a context from `docker context ls` and endpoint is the address for docker socket (eg. `DOCKER_HOST` value).
|
||||
|
||||
By default, the current docker configuration is used for determining the context/endpoint value.
|
||||
|
||||
Builder instances are isolated environments where builds can be invoked. All docker contexts also get the default builder instance.
|
||||
|
||||
Options:
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| --append | Append a node to builder instead of changing it
|
||||
| --driver string | Driver to use (eg. docker-container)
|
||||
| --leave | Remove a node from builder instead of changing it
|
||||
| --name string | Builder instance name
|
||||
| --node string | Create/modify node with given name
|
||||
| --platform stringArray | Fixed platforms for current node
|
||||
| --use | Set the current builder instance
|
||||
|
||||
#### `--driver DRIVER`
|
||||
|
||||
Sets the builder driver to be used. There are two available drivers, each have their own specificities.
|
||||
|
||||
- `docker` - Uses the builder that is built into the docker daemon. With this driver, the [`--load`](#--load) flag is implied by default on `buildx build`. However, building multi-platform images or exporting cache is not currently supported.
|
||||
|
||||
- `docker-container` - Uses a buildkit container that will be spawned via docker. With this driver, both building multi-platform images and exporting cache are supported. However, images built will not automatically appear in `docker images` (see [`build --load`](#--load)).
|
||||
|
||||
#### `--append`
|
||||
|
||||
Changes the action of the command to appends a new node to an existing builder specified by `--name`. Buildx will choose an appropriate node for a build based on the platforms it supports.
|
||||
|
||||
Example:
|
||||
```
|
||||
$ docker buildx create mycontext1
|
||||
eager_beaver
|
||||
$ docker buildx create --name eager_beaver --append mycontext2
|
||||
eager_beaver
|
||||
```
|
||||
|
||||
#### `--leave`
|
||||
|
||||
Changes the action of the command to removes a node from a builder. The builder needs to be specified with `--name` and node that is removed is set with `--node`.
|
||||
|
||||
Example:
|
||||
```
|
||||
docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||
```
|
||||
|
||||
#### `--name NAME`
|
||||
|
||||
Specifies the name of the builder to be created or modified. If none is specified, one will be automatically generated.
|
||||
|
||||
#### `--node NODE`
|
||||
|
||||
Specifies the name of the node to be created or modified. If none is specified, it is the name of the builder it belongs to, with an index number suffix.
|
||||
|
||||
#### `--platform PLATFORMS`
|
||||
|
||||
Sets the platforms supported by the node. It expects a comma-separated list of platforms of the form OS/architecture/variant. The node will also automatically detect the platforms it supports, but manual values take priority over the detected ones and can be used when multiple nodes support building for the same platform.
|
||||
|
||||
Example:
|
||||
```
|
||||
docker buildx create --platform linux/amd64
|
||||
docker buildx create --platform linux/arm64,linux/arm/v8
|
||||
```
|
||||
|
||||
#### `--use`
|
||||
|
||||
Automatically switches the current builder to the newly created one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||
|
||||
### `buildx use NAME`
|
||||
|
||||
Switches the current builder instance. Build commands invoked after this command will run on a specified builder. Alternatively, a context name can be used to switch to the default builder of that context.
|
||||
|
||||
### `buildx inspect [NAME]`
|
||||
|
||||
Shows information about the current or specified builder.
|
||||
|
||||
Example:
|
||||
```
|
||||
Name: elated_tesla
|
||||
Driver: docker-container
|
||||
|
||||
Nodes:
|
||||
Name: elated_tesla0
|
||||
Endpoint: unix:///var/run/docker.sock
|
||||
Status: running
|
||||
Platforms: linux/amd64
|
||||
|
||||
Name: elated_tesla1
|
||||
Endpoint: ssh://ubuntu@1.2.3.4
|
||||
Status: running
|
||||
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
```
|
||||
|
||||
#### `--bootstrap`
|
||||
|
||||
Ensures that the builder is running before inspecting it. If the driver is `docker-container`, then `--bootstrap` starts the buildkit container and waits until it is operational. Bootstrapping is automatically done during build, it is thus not necessary. The same BuildKit container is used during the lifetime of the associated builder node (as displayed in `buildx ls`).
|
||||
|
||||
### `buildx ls`
|
||||
|
||||
Lists all builder instances and the nodes for each instance
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
docker buildx ls
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
elated_tesla * docker-container
|
||||
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64
|
||||
```
|
||||
|
||||
Each builder has one or more nodes associated with it. The current builder’s name is marked with a `*`.
|
||||
|
||||
### `buildx stop [NAME]`
|
||||
|
||||
Stops the specified or current builder. This will not prevent buildx build to restart the builder. The implementation of stop depends on the driver.
|
||||
|
||||
### `buildx rm [NAME]`
|
||||
|
||||
Removes the specified or current builder. It is a no-op attempting to remove the default builder.
|
||||
|
||||
### `buildx bake [OPTIONS] [TARGET...]`
|
||||
|
||||
Bake is a high-level build command.
|
||||
|
||||
Each specified target will run in parallel as part of the build.
|
||||
|
||||
Options:
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| -f, --file stringArray | Build definition file
|
||||
| --no-cache | Do not use cache when building the image
|
||||
| --print | Print the options without building
|
||||
| --progress string | Set type of progress output (auto, plain, tty). Use plain to show container output (default "auto")
|
||||
| --pull | Always attempt to pull a newer version of the image
|
||||
| --set stringArray | Override target value (eg: target.key=value)
|
||||
|
||||
#### `-f, --file FILE`
|
||||
|
||||
Specifies the bake definition file. The file can be a Docker Compose, JSON or HCL file. If multiple files are specified they are all read and configurations are combined. By default, if no files are specified, the following are parsed:
|
||||
docker-compose.yml
|
||||
docker-compose.yaml
|
||||
docker-bake.json
|
||||
docker-bake.override.json
|
||||
docker-bake.hcl
|
||||
docker-bake.override.hcl
|
||||
|
||||
#### `--no-cache`
|
||||
|
||||
Same as `build --no-cache`. Do not use cache when building the image.
|
||||
|
||||
#### `--print`
|
||||
|
||||
Prints the resulting options of the targets desired to be built, in a JSON format, without starting a build.
|
||||
|
||||
```
|
||||
$ docker buildx bake -f docker-bake.hcl --print db
|
||||
{
|
||||
"target": {
|
||||
"db": {
|
||||
"context": "./",
|
||||
"dockerfile": "Dockerfile",
|
||||
"tags": [
|
||||
"docker.io/tiborvass/db"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `--progress`
|
||||
|
||||
Same as `build --progress`. Set type of progress output (auto, plain, tty). Use plain to show container output (default "auto").
|
||||
|
||||
#### `--pull`
|
||||
|
||||
Same as `build --pull`.
|
||||
|
||||
#### `--set target.key[.subkey]=value`
|
||||
|
||||
Override target configurations from command line.
|
||||
|
||||
Example:
|
||||
```
|
||||
docker buildx bake --set target.args.mybuildarg=value
|
||||
docker buildx bake --set target.platform=linux/arm64
|
||||
```
|
||||
|
||||
#### File definition
|
||||
|
||||
In addition to compose files, bake supports a JSON and an equivalent HCL file format for defining build groups and targets.
|
||||
|
||||
A target reflects a single docker build invocation with the same options that you would specify for `docker build`. A group is a grouping of targets.
|
||||
|
||||
Multiple files can include the same target and final build options will be determined by merging them together.
|
||||
|
||||
In the case of compose files, each service corresponds to a target.
|
||||
|
||||
A group can specify its list of targets with the `targets` option. A target can inherit build options by setting the `inherits` option to the list of targets or groups to inherit from.
|
||||
|
||||
Note: Design of bake command is work in progress, the user experience may change based on feedback.
|
||||
|
||||
|
||||
|
||||
Example HCL defintion:
|
||||
|
||||
```
|
||||
group “default” {
|
||||
targets = [“db”, “webapp-dev”]
|
||||
}
|
||||
|
||||
target “webapp-dev” {
|
||||
dockerfile = "Dockerfile.webapp"
|
||||
tags = ["docker.io/username/webapp"]
|
||||
}
|
||||
|
||||
target “webapp-release” {
|
||||
inherits = [“webapp-dev”]
|
||||
platforms = [“linux/amd64”, “linux/arm64”]
|
||||
}
|
||||
|
||||
target “db” {
|
||||
dockerfile = "Dockerfile.db"
|
||||
tags = [“docker.io/username/db”]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### `buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]`
|
||||
|
||||
Imagetools contains commands for working with manifest lists in the registry. These commands are useful for inspecting multi-platform build results.
|
||||
|
||||
Create creates a new manifest list based on source manifests. The source manifests can be manifest lists or single platform distribution manifests and must already exist in the registry where the new manifest is created. If only one source is specified create performs a carbon copy.
|
||||
|
||||
Options:
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| --append | Append to existing manifest
|
||||
| --dry-run | Show final image instead of pushing
|
||||
| -f, --file stringArray | Read source descriptor from file
|
||||
| -t, --tag stringArray | Set reference for new image
|
||||
|
||||
#### `--append`
|
||||
|
||||
Append appends the new sources to an existing manifest list in the destination.
|
||||
|
||||
#### `--dry-run`
|
||||
|
||||
Do not push the image, just show it.
|
||||
|
||||
#### `-f, --file FILE`
|
||||
|
||||
Reads source from files. A source can be a manifest digest, manifest reference or a JSON of OCI descriptor object.
|
||||
|
||||
#### `-t, --tag IMAGE`
|
||||
|
||||
Name of the image to be created.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
|
||||
docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||
```
|
||||
|
||||
|
||||
### `buildx imagetools inspect NAME`
|
||||
|
||||
Show details of image in the registry.
|
||||
|
||||
Example:
|
||||
```
|
||||
$ docker buildx imagetools inspect alpine
|
||||
Name: docker.io/library/alpine:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v6
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
#### `--raw`
|
||||
|
||||
Raw prints the original JSON bytes instead of the formatted output.
|
||||
|
||||
[`buildx bake` Reference Docs](docs/reference/buildx_bake.md)
|
||||
|
||||
# Setting buildx as default builder in Docker 19.03+
|
||||
|
||||
@@ -640,20 +184,5 @@ To remove this alias, you can run `docker buildx uninstall`.
|
||||
|
||||
# Contributing
|
||||
|
||||
To enter a demo container environment and experiment, you may run:
|
||||
|
||||
```
|
||||
$ make shell
|
||||
```
|
||||
|
||||
To validate PRs before submitting them you should run:
|
||||
|
||||
```
|
||||
$ make validate-all
|
||||
```
|
||||
|
||||
To generate new vendored files with go modules run:
|
||||
|
||||
```
|
||||
$ make vendor
|
||||
```
|
||||
Want to contribute to Buildx? Awesome! You can find information about
|
||||
contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)
|
||||
|
618
bake/bake.go
618
bake/bake.go
@@ -2,158 +2,334 @@ package bake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/bake/hclparser"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ReadTargets(ctx context.Context, files, targets, overrides []string) (map[string]Target, error) {
|
||||
var c Config
|
||||
for _, f := range files {
|
||||
cfg, err := ParseFile(f)
|
||||
var httpPrefix = regexp.MustCompile(`^https?://`)
|
||||
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func defaultFilenames() []string {
|
||||
return []string{
|
||||
"docker-compose.yml", // support app
|
||||
"docker-compose.yaml", // support app
|
||||
"docker-bake.json",
|
||||
"docker-bake.override.json",
|
||||
"docker-bake.hcl",
|
||||
"docker-bake.override.hcl",
|
||||
}
|
||||
}
|
||||
|
||||
func ReadLocalFiles(names []string) ([]File, error) {
|
||||
isDefault := false
|
||||
if len(names) == 0 {
|
||||
isDefault = true
|
||||
names = defaultFilenames()
|
||||
}
|
||||
out := make([]File, 0, len(names))
|
||||
|
||||
for _, n := range names {
|
||||
dt, err := ioutil.ReadFile(n)
|
||||
if err != nil {
|
||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
c = mergeConfig(c, *cfg)
|
||||
out = append(out, File{Name: n, Data: dt})
|
||||
}
|
||||
if err := c.setOverrides(overrides); err != nil {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, error) {
|
||||
c, err := ParseFiles(files, defaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := map[string]Target{}
|
||||
|
||||
o, err := c.newOverrides(overrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := map[string]*Target{}
|
||||
for _, n := range targets {
|
||||
for _, n := range c.ResolveGroup(n) {
|
||||
t, err := c.ResolveTarget(n)
|
||||
t, err := c.ResolveTarget(n, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t != nil {
|
||||
m[n] = *t
|
||||
m[n] = t
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func ParseFile(fn string) (*Config, error) {
|
||||
dt, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||
defer func() {
|
||||
err = formatHCLError(err, files)
|
||||
}()
|
||||
|
||||
var c Config
|
||||
var fs []*hcl.File
|
||||
for _, f := range files {
|
||||
cfg, isCompose, composeErr := ParseComposeFile(f.Data, f.Name)
|
||||
if isCompose {
|
||||
if composeErr != nil {
|
||||
return nil, composeErr
|
||||
}
|
||||
c = mergeConfig(c, *cfg)
|
||||
c = dedupeConfig(c)
|
||||
}
|
||||
if !isCompose {
|
||||
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
||||
if isHCL {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs = append(fs, hf)
|
||||
} else if composeErr != nil {
|
||||
return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(fs) > 0 {
|
||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
}, &c); err.HasErrors() {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func dedupeConfig(c Config) Config {
|
||||
c2 := c
|
||||
c2.Targets = make([]*Target, 0, len(c2.Targets))
|
||||
m := map[string]*Target{}
|
||||
for _, t := range c.Targets {
|
||||
if t2, ok := m[t.Name]; ok {
|
||||
t2.Merge(t)
|
||||
} else {
|
||||
m[t.Name] = t
|
||||
c2.Targets = append(c2.Targets, t)
|
||||
}
|
||||
}
|
||||
return c2
|
||||
}
|
||||
|
||||
func ParseFile(dt []byte, fn string) (*Config, error) {
|
||||
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||
}
|
||||
|
||||
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
|
||||
fnl := strings.ToLower(fn)
|
||||
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
||||
return ParseCompose(dt)
|
||||
cfg, err := ParseCompose(dt)
|
||||
return cfg, true, err
|
||||
}
|
||||
|
||||
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
||||
return ParseHCL(dt)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
cfg, err := ParseCompose(dt)
|
||||
if err != nil {
|
||||
cfg, err2 := ParseHCL(dt)
|
||||
if err2 != nil {
|
||||
return nil, errors.Errorf("failed to parse %s: parsing yaml: %s, parsing hcl: %s", fn, err.Error(), err2.Error())
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
return cfg, nil
|
||||
return cfg, err == nil, err
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Group map[string]Group
|
||||
Target map[string]Target
|
||||
Groups []*Group `json:"group" hcl:"group,block"`
|
||||
Targets []*Target `json:"target" hcl:"target,block"`
|
||||
}
|
||||
|
||||
func mergeConfig(c1, c2 Config) Config {
|
||||
for k, g := range c2.Group {
|
||||
if c1.Group == nil {
|
||||
c1.Group = map[string]Group{}
|
||||
}
|
||||
c1.Group[k] = g
|
||||
if c1.Groups == nil {
|
||||
c1.Groups = []*Group{}
|
||||
}
|
||||
|
||||
for k, t := range c2.Target {
|
||||
if c1.Target == nil {
|
||||
c1.Target = map[string]Target{}
|
||||
for _, g2 := range c2.Groups {
|
||||
var g1 *Group
|
||||
for _, g := range c1.Groups {
|
||||
if g2.Name == g.Name {
|
||||
g1 = g
|
||||
break
|
||||
}
|
||||
}
|
||||
if base, ok := c1.Target[k]; ok {
|
||||
t = merge(base, t)
|
||||
if g1 == nil {
|
||||
c1.Groups = append(c1.Groups, g2)
|
||||
continue
|
||||
}
|
||||
c1.Target[k] = t
|
||||
|
||||
nextTarget:
|
||||
for _, t2 := range g2.Targets {
|
||||
for _, t1 := range g1.Targets {
|
||||
if t1 == t2 {
|
||||
continue nextTarget
|
||||
}
|
||||
}
|
||||
g1.Targets = append(g1.Targets, t2)
|
||||
}
|
||||
c1.Groups = append(c1.Groups, g1)
|
||||
}
|
||||
|
||||
if c1.Targets == nil {
|
||||
c1.Targets = []*Target{}
|
||||
}
|
||||
|
||||
for _, t2 := range c2.Targets {
|
||||
var t1 *Target
|
||||
for _, t := range c1.Targets {
|
||||
if t2.Name == t.Name {
|
||||
t1 = t
|
||||
break
|
||||
}
|
||||
}
|
||||
if t1 != nil {
|
||||
t1.Merge(t2)
|
||||
t2 = t1
|
||||
}
|
||||
c1.Targets = append(c1.Targets, t2)
|
||||
}
|
||||
|
||||
return c1
|
||||
}
|
||||
|
||||
func (c Config) setOverrides(v []string) error {
|
||||
for _, v := range v {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return errors.Errorf("invalid override %s, expected target.name=value", v)
|
||||
func (c Config) expandTargets(pattern string) ([]string, error) {
|
||||
for _, target := range c.Targets {
|
||||
if target.Name == pattern {
|
||||
return []string{pattern}, nil
|
||||
}
|
||||
}
|
||||
|
||||
var names []string
|
||||
for _, target := range c.Targets {
|
||||
ok, err := path.Match(pattern, target.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not match targets with '%s'", pattern)
|
||||
}
|
||||
if ok {
|
||||
names = append(names, target.Name)
|
||||
}
|
||||
}
|
||||
if len(names) == 0 {
|
||||
return nil, errors.Errorf("could not find any target matching '%s'", pattern)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (c Config) newOverrides(v []string) (map[string]*Target, error) {
|
||||
m := map[string]*Target{}
|
||||
for _, v := range v {
|
||||
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
keys := strings.SplitN(parts[0], ".", 3)
|
||||
if len(keys) < 2 {
|
||||
return errors.Errorf("invalid override key %s, expected target.name", parts[0])
|
||||
return nil, errors.Errorf("invalid override key %s, expected target.name", parts[0])
|
||||
}
|
||||
|
||||
name := keys[0]
|
||||
|
||||
t, ok := c.Target[name]
|
||||
if !ok {
|
||||
return errors.Errorf("unknown target %s", name)
|
||||
pattern := keys[0]
|
||||
if len(parts) != 2 && keys[1] != "args" {
|
||||
return nil, errors.Errorf("invalid override %s, expected target.name=value", v)
|
||||
}
|
||||
|
||||
switch keys[1] {
|
||||
case "context":
|
||||
t.Context = &parts[1]
|
||||
case "dockerfile":
|
||||
t.Dockerfile = &parts[1]
|
||||
case "args":
|
||||
if len(keys) != 3 {
|
||||
return errors.Errorf("invalid key %s, args requires name", parts[0])
|
||||
}
|
||||
if t.Args == nil {
|
||||
t.Args = map[string]string{}
|
||||
}
|
||||
t.Args[keys[2]] = parts[1]
|
||||
case "labels":
|
||||
if len(keys) != 3 {
|
||||
return errors.Errorf("invalid key %s, lanels requires name", parts[0])
|
||||
}
|
||||
if t.Labels == nil {
|
||||
t.Labels = map[string]string{}
|
||||
}
|
||||
t.Labels[keys[2]] = parts[1]
|
||||
case "tags":
|
||||
t.Tags = append(t.Tags, parts[1])
|
||||
case "cache-from":
|
||||
t.CacheFrom = append(t.CacheFrom, parts[1])
|
||||
case "cache-to":
|
||||
t.CacheTo = append(t.CacheTo, parts[1])
|
||||
case "target":
|
||||
s := parts[1]
|
||||
t.Target = &s
|
||||
case "secrets":
|
||||
t.Secrets = append(t.Secrets, parts[1])
|
||||
case "ssh":
|
||||
t.SSH = append(t.SSH, parts[1])
|
||||
case "platform":
|
||||
t.Platforms = append(t.Platforms, parts[1])
|
||||
case "output":
|
||||
t.Outputs = append(t.Outputs, parts[1])
|
||||
default:
|
||||
return errors.Errorf("unknown key: %s", keys[1])
|
||||
names, err := c.expandTargets(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
t, ok := m[name]
|
||||
if !ok {
|
||||
t = &Target{}
|
||||
}
|
||||
|
||||
switch keys[1] {
|
||||
case "context":
|
||||
t.Context = &parts[1]
|
||||
case "dockerfile":
|
||||
t.Dockerfile = &parts[1]
|
||||
case "args":
|
||||
if len(keys) != 3 {
|
||||
return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
|
||||
}
|
||||
if t.Args == nil {
|
||||
t.Args = map[string]string{}
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
v, ok := os.LookupEnv(keys[2])
|
||||
if ok {
|
||||
t.Args[keys[2]] = v
|
||||
}
|
||||
} else {
|
||||
t.Args[keys[2]] = parts[1]
|
||||
}
|
||||
case "labels":
|
||||
if len(keys) != 3 {
|
||||
return nil, errors.Errorf("invalid key %s, lanels requires name", parts[0])
|
||||
}
|
||||
if t.Labels == nil {
|
||||
t.Labels = map[string]string{}
|
||||
}
|
||||
t.Labels[keys[2]] = parts[1]
|
||||
case "tags":
|
||||
t.Tags = append(t.Tags, parts[1])
|
||||
case "cache-from":
|
||||
t.CacheFrom = append(t.CacheFrom, parts[1])
|
||||
case "cache-to":
|
||||
t.CacheTo = append(t.CacheTo, parts[1])
|
||||
case "target":
|
||||
s := parts[1]
|
||||
t.Target = &s
|
||||
case "secrets":
|
||||
t.Secrets = append(t.Secrets, parts[1])
|
||||
case "ssh":
|
||||
t.SSH = append(t.SSH, parts[1])
|
||||
case "platform":
|
||||
t.Platforms = append(t.Platforms, parts[1])
|
||||
case "output":
|
||||
t.Outputs = append(t.Outputs, parts[1])
|
||||
case "no-cache":
|
||||
noCache, err := strconv.ParseBool(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid value %s for boolean key no-cache", parts[1])
|
||||
}
|
||||
t.NoCache = &noCache
|
||||
case "pull":
|
||||
pull, err := strconv.ParseBool(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid value %s for boolean key pull", parts[1])
|
||||
}
|
||||
t.Pull = &pull
|
||||
default:
|
||||
return nil, errors.Errorf("unknown key: %s", keys[1])
|
||||
}
|
||||
m[name] = t
|
||||
}
|
||||
c.Target[name] = t
|
||||
}
|
||||
return nil
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c Config) ResolveGroup(name string) []string {
|
||||
@@ -164,8 +340,14 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil
|
||||
}
|
||||
g, ok := c.Group[name]
|
||||
if !ok {
|
||||
var g *Group
|
||||
for _, group := range c.Groups {
|
||||
if group.Name == name {
|
||||
g = group
|
||||
break
|
||||
}
|
||||
}
|
||||
if g == nil {
|
||||
return []string{name}
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
@@ -176,8 +358,8 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
return targets
|
||||
}
|
||||
|
||||
func (c Config) ResolveTarget(name string) (*Target, error) {
|
||||
t, err := c.target(name, map[string]struct{}{})
|
||||
func (c Config) ResolveTarget(name string, overrides map[string]*Target) (*Target, error) {
|
||||
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -192,50 +374,72 @@ func (c Config) ResolveTarget(name string) (*Target, error) {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c Config) target(name string, visited map[string]struct{}) (*Target, error) {
|
||||
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]*Target) (*Target, error) {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil, nil
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
t, ok := c.Target[name]
|
||||
if !ok {
|
||||
var t *Target
|
||||
for _, target := range c.Targets {
|
||||
if target.Name == name {
|
||||
t = target
|
||||
break
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
return nil, errors.Errorf("failed to find target %s", name)
|
||||
}
|
||||
var tt Target
|
||||
tt := &Target{}
|
||||
for _, name := range t.Inherits {
|
||||
t, err := c.target(name, visited)
|
||||
t, err := c.target(name, visited, overrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t != nil {
|
||||
tt = merge(tt, *t)
|
||||
tt.Merge(t)
|
||||
}
|
||||
}
|
||||
t.Inherits = nil
|
||||
tt = merge(merge(defaultTarget(), t), tt)
|
||||
m := defaultTarget()
|
||||
m.Merge(tt)
|
||||
m.Merge(t)
|
||||
tt = m
|
||||
if override, ok := overrides[name]; ok {
|
||||
tt.Merge(override)
|
||||
}
|
||||
tt.normalize()
|
||||
return &tt, nil
|
||||
return tt, nil
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
Targets []string
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
Targets []string `json:"targets" hcl:"targets"`
|
||||
// Target // TODO?
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,omitempty"`
|
||||
Context *string `json:"context,omitempty" hcl:"context,omitempty"`
|
||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,omitempty"`
|
||||
Args map[string]string `json:"args,omitempty" hcl:"args,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty" hcl:"labels,omitempty"`
|
||||
Tags []string `json:"tags,omitempty" hcl:"tags,omitempty"`
|
||||
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,omitempty"`
|
||||
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,omitempty"`
|
||||
Target *string `json:"target,omitempty" hcl:"target,omitempty"`
|
||||
Secrets []string `json:"secret,omitempty" hcl:"secret,omitempty"`
|
||||
SSH []string `json:"ssh,omitempty" hcl:"ssh,omitempty"`
|
||||
Platforms []string `json:"platforms,omitempty" hcl:"platforms,omitempty"`
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,omitempty"`
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
|
||||
// Inherits is the only field that cannot be overridden with --set
|
||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
||||
|
||||
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
||||
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
||||
Labels map[string]string `json:"labels,omitempty" hcl:"labels,optional"`
|
||||
Tags []string `json:"tags,omitempty" hcl:"tags,optional"`
|
||||
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional"`
|
||||
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional"`
|
||||
Target *string `json:"target,omitempty" hcl:"target,optional"`
|
||||
Secrets []string `json:"secret,omitempty" hcl:"secret,optional"`
|
||||
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional"`
|
||||
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional"`
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
||||
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
||||
}
|
||||
|
||||
func (t *Target) normalize() {
|
||||
@@ -248,10 +452,65 @@ func (t *Target) normalize() {
|
||||
t.Outputs = removeDupes(t.Outputs)
|
||||
}
|
||||
|
||||
func TargetsToBuildOpt(m map[string]Target) (map[string]build.Options, error) {
|
||||
func (t *Target) Merge(t2 *Target) {
|
||||
if t2.Context != nil {
|
||||
t.Context = t2.Context
|
||||
}
|
||||
if t2.Dockerfile != nil {
|
||||
t.Dockerfile = t2.Dockerfile
|
||||
}
|
||||
if t2.DockerfileInline != nil {
|
||||
t.DockerfileInline = t2.DockerfileInline
|
||||
}
|
||||
for k, v := range t2.Args {
|
||||
if t.Args == nil {
|
||||
t.Args = map[string]string{}
|
||||
}
|
||||
t.Args[k] = v
|
||||
}
|
||||
for k, v := range t2.Labels {
|
||||
if t.Labels == nil {
|
||||
t.Labels = map[string]string{}
|
||||
}
|
||||
t.Labels[k] = v
|
||||
}
|
||||
if t2.Tags != nil { // no merge
|
||||
t.Tags = t2.Tags
|
||||
}
|
||||
if t2.Target != nil {
|
||||
t.Target = t2.Target
|
||||
}
|
||||
if t2.Secrets != nil { // merge
|
||||
t.Secrets = append(t.Secrets, t2.Secrets...)
|
||||
}
|
||||
if t2.SSH != nil { // merge
|
||||
t.SSH = append(t.SSH, t2.SSH...)
|
||||
}
|
||||
if t2.Platforms != nil { // no merge
|
||||
t.Platforms = t2.Platforms
|
||||
}
|
||||
if t2.CacheFrom != nil { // merge
|
||||
t.CacheFrom = append(t.CacheFrom, t2.CacheFrom...)
|
||||
}
|
||||
if t2.CacheTo != nil { // no merge
|
||||
t.CacheTo = t2.CacheTo
|
||||
}
|
||||
if t2.Outputs != nil { // no merge
|
||||
t.Outputs = t2.Outputs
|
||||
}
|
||||
if t2.Pull != nil {
|
||||
t.Pull = t2.Pull
|
||||
}
|
||||
if t2.NoCache != nil {
|
||||
t.NoCache = t2.NoCache
|
||||
}
|
||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||
}
|
||||
|
||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||
m2 := make(map[string]build.Options, len(m))
|
||||
for k, v := range m {
|
||||
bo, err := toBuildOpt(v)
|
||||
bo, err := toBuildOpt(v, inp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -260,7 +519,25 @@ func TargetsToBuildOpt(m map[string]Target) (map[string]build.Options, error) {
|
||||
return m2, nil
|
||||
}
|
||||
|
||||
func toBuildOpt(t Target) (*build.Options, error) {
|
||||
func updateContext(t *build.Inputs, inp *Input) {
|
||||
if inp == nil || inp.State == nil {
|
||||
return
|
||||
}
|
||||
if t.ContextPath == "." {
|
||||
t.ContextPath = inp.URL
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(t.ContextPath, "cwd://") {
|
||||
return
|
||||
}
|
||||
if IsRemoteURL(t.ContextPath) {
|
||||
return
|
||||
}
|
||||
st := llb.Scratch().File(llb.Copy(*inp.State, t.ContextPath, "/"), llb.WithCustomNamef("set context to %s", t.ContextPath))
|
||||
t.ContextState = &st
|
||||
}
|
||||
|
||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if v := t.Context; v != nil && *v == "-" {
|
||||
return nil, errors.Errorf("context from stdin not allowed in bake")
|
||||
}
|
||||
@@ -272,6 +549,9 @@ func toBuildOpt(t Target) (*build.Options, error) {
|
||||
if t.Context != nil {
|
||||
contextPath = *t.Context
|
||||
}
|
||||
if !strings.HasPrefix(contextPath, "cwd://") && !IsRemoteURL(contextPath) {
|
||||
contextPath = path.Clean(contextPath)
|
||||
}
|
||||
dockerfilePath := "Dockerfile"
|
||||
if t.Dockerfile != nil {
|
||||
dockerfilePath = *t.Dockerfile
|
||||
@@ -281,14 +561,36 @@ func toBuildOpt(t Target) (*build.Options, error) {
|
||||
dockerfilePath = path.Join(contextPath, dockerfilePath)
|
||||
}
|
||||
|
||||
noCache := false
|
||||
if t.NoCache != nil {
|
||||
noCache = *t.NoCache
|
||||
}
|
||||
pull := false
|
||||
if t.Pull != nil {
|
||||
pull = *t.Pull
|
||||
}
|
||||
|
||||
bi := build.Inputs{
|
||||
ContextPath: contextPath,
|
||||
DockerfilePath: dockerfilePath,
|
||||
}
|
||||
if t.DockerfileInline != nil {
|
||||
bi.DockerfileInline = *t.DockerfileInline
|
||||
}
|
||||
updateContext(&bi, inp)
|
||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||
}
|
||||
|
||||
t.Context = &bi.ContextPath
|
||||
|
||||
bo := &build.Options{
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: contextPath,
|
||||
DockerfilePath: dockerfilePath,
|
||||
},
|
||||
Inputs: bi,
|
||||
Tags: t.Tags,
|
||||
BuildArgs: t.Args,
|
||||
Labels: t.Labels,
|
||||
NoCache: noCache,
|
||||
Pull: pull,
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(t.Platforms)
|
||||
@@ -299,13 +601,17 @@ func toBuildOpt(t Target) (*build.Options, error) {
|
||||
|
||||
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
|
||||
secrets, err := build.ParseSecretSpecs(t.Secrets)
|
||||
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bo.Session = append(bo.Session, secrets)
|
||||
|
||||
ssh, err := build.ParseSSHSpecs(t.SSH)
|
||||
sshSpecs := t.SSH
|
||||
if len(sshSpecs) == 0 && buildflags.IsGitSSH(contextPath) {
|
||||
sshSpecs = []string{"default"}
|
||||
}
|
||||
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,19 +621,19 @@ func toBuildOpt(t Target) (*build.Options, error) {
|
||||
bo.Target = *t.Target
|
||||
}
|
||||
|
||||
cacheImports, err := build.ParseCacheEntry(t.CacheFrom)
|
||||
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bo.CacheFrom = cacheImports
|
||||
|
||||
cacheExports, err := build.ParseCacheEntry(t.CacheTo)
|
||||
cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bo.CacheTo = cacheExports
|
||||
|
||||
outputs, err := build.ParseOutputs(t.Outputs)
|
||||
outputs, err := buildflags.ParseOutputs(t.Outputs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -336,55 +642,8 @@ func toBuildOpt(t Target) (*build.Options, error) {
|
||||
return bo, nil
|
||||
}
|
||||
|
||||
func defaultTarget() Target {
|
||||
return Target{}
|
||||
}
|
||||
|
||||
func merge(t1, t2 Target) Target {
|
||||
if t2.Context != nil {
|
||||
t1.Context = t2.Context
|
||||
}
|
||||
if t2.Dockerfile != nil {
|
||||
t1.Dockerfile = t2.Dockerfile
|
||||
}
|
||||
for k, v := range t2.Args {
|
||||
if t1.Args == nil {
|
||||
t1.Args = map[string]string{}
|
||||
}
|
||||
t1.Args[k] = v
|
||||
}
|
||||
for k, v := range t2.Labels {
|
||||
if t1.Labels == nil {
|
||||
t1.Labels = map[string]string{}
|
||||
}
|
||||
t1.Labels[k] = v
|
||||
}
|
||||
if t2.Tags != nil { // no merge
|
||||
t1.Tags = t2.Tags
|
||||
}
|
||||
if t2.Target != nil {
|
||||
t1.Target = t2.Target
|
||||
}
|
||||
if t2.Secrets != nil { // merge
|
||||
t1.Secrets = append(t1.Secrets, t2.Secrets...)
|
||||
}
|
||||
if t2.SSH != nil { // merge
|
||||
t1.SSH = append(t1.SSH, t2.SSH...)
|
||||
}
|
||||
if t2.Platforms != nil { // no merge
|
||||
t1.Platforms = t2.Platforms
|
||||
}
|
||||
if t2.CacheFrom != nil { // no merge
|
||||
t1.CacheFrom = append(t1.CacheFrom, t2.CacheFrom...)
|
||||
}
|
||||
if t2.CacheTo != nil { // no merge
|
||||
t1.CacheTo = t2.CacheTo
|
||||
}
|
||||
if t2.Outputs != nil { // no merge
|
||||
t1.Outputs = t2.Outputs
|
||||
}
|
||||
t1.Inherits = append(t1.Inherits, t2.Inherits...)
|
||||
return t1
|
||||
func defaultTarget() *Target {
|
||||
return &Target{}
|
||||
}
|
||||
|
||||
func removeDupes(s []string) []string {
|
||||
@@ -394,6 +653,9 @@ func removeDupes(s []string) []string {
|
||||
if _, ok := seen[v]; ok {
|
||||
continue
|
||||
}
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
seen[v] = struct{}{}
|
||||
s[i] = v
|
||||
i++
|
||||
|
@@ -2,9 +2,7 @@ package bake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -12,40 +10,182 @@ import (
|
||||
|
||||
func TestReadTargets(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpdir, err := ioutil.TempDir("", "bake")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
fp := filepath.Join(tmpdir, "config.hcl")
|
||||
err = ioutil.WriteFile(fp, []byte(`
|
||||
target "dep" {
|
||||
fp := File{
|
||||
Name: "config.hcl",
|
||||
Data: []byte(`
|
||||
target "webDEP" {
|
||||
args = {
|
||||
VAR_INHERITED = "webDEP"
|
||||
VAR_BOTH = "webDEP"
|
||||
}
|
||||
no-cache = true
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
dockerfile = "Dockerfile.webapp"
|
||||
inherits = ["dep"]
|
||||
}`), 0600)
|
||||
require.NoError(t, err)
|
||||
args = {
|
||||
VAR_BOTH = "webapp"
|
||||
}
|
||||
inherits = ["webDEP"]
|
||||
}`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
m, err := ReadTargets(ctx, []string{fp}, []string{"webapp"}, nil)
|
||||
require.NoError(t, err)
|
||||
t.Run("NoOverrides", func(t *testing.T) {
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(m))
|
||||
|
||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, ".", *m["webapp"].Context)
|
||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, ".", *m["webapp"].Context)
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, true, *m["webapp"].NoCache)
|
||||
require.Nil(t, m["webapp"].Pull)
|
||||
})
|
||||
|
||||
t.Run("InvalidTargetOverrides", func(t *testing.T) {
|
||||
_, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"nosuchtarget.context=foo"}, nil)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, err.Error(), "could not find any target matching 'nosuchtarget'")
|
||||
})
|
||||
|
||||
t.Run("ArgsOverrides", func(t *testing.T) {
|
||||
t.Run("leaf", func(t *testing.T) {
|
||||
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
|
||||
defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
|
||||
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
"webapp.args.VAR_UNSET",
|
||||
"webapp.args.VAR_EMPTY=",
|
||||
"webapp.args.VAR_SET=bananas",
|
||||
"webapp.args.VAR_FROMENV" + t.Name(),
|
||||
"webapp.args.VAR_INHERITED=override",
|
||||
// not overriding VAR_BOTH on purpose
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, ".", *m["webapp"].Context)
|
||||
|
||||
_, isSet := m["webapp"].Args["VAR_UNSET"]
|
||||
require.False(t, isSet, m["webapp"].Args["VAR_UNSET"])
|
||||
|
||||
_, isSet = m["webapp"].Args["VAR_EMPTY"]
|
||||
require.True(t, isSet, m["webapp"].Args["VAR_EMPTY"])
|
||||
|
||||
require.Equal(t, m["webapp"].Args["VAR_SET"], "bananas")
|
||||
|
||||
require.Equal(t, m["webapp"].Args["VAR_FROMENV"+t.Name()], "fromEnv")
|
||||
|
||||
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
||||
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
||||
})
|
||||
|
||||
// building leaf but overriding parent fields
|
||||
t.Run("parent", func(t *testing.T) {
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
"webDEP.args.VAR_INHERITED=override",
|
||||
"webDEP.args.VAR_BOTH=override",
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
||||
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("ContextOverride", func(t *testing.T) {
|
||||
_, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil)
|
||||
require.NotNil(t, err)
|
||||
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "foo", *m["webapp"].Context)
|
||||
})
|
||||
|
||||
t.Run("NoCacheOverride", func(t *testing.T) {
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, *m["webapp"].NoCache)
|
||||
})
|
||||
|
||||
t.Run("PullOverride", func(t *testing.T) {
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, *m["webapp"].Pull)
|
||||
})
|
||||
|
||||
t.Run("PatternOverride", func(t *testing.T) {
|
||||
// same check for two cases
|
||||
multiTargetCheck := func(t *testing.T, m map[string]*Target, err error) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "foo", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, "foo", *m["webDEP"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webDEP"].Args["VAR_INHERITED"])
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
targets []string
|
||||
overrides []string
|
||||
check func(*testing.T, map[string]*Target, error)
|
||||
}{
|
||||
{
|
||||
name: "multi target single pattern",
|
||||
targets: []string{"webapp", "webDEP"},
|
||||
overrides: []string{"web*.dockerfile=foo"},
|
||||
check: multiTargetCheck,
|
||||
},
|
||||
{
|
||||
name: "multi target multi pattern",
|
||||
targets: []string{"webapp", "webDEP"},
|
||||
overrides: []string{"web*.dockerfile=foo", "*.args.VAR_BOTH=bar"},
|
||||
check: multiTargetCheck,
|
||||
},
|
||||
{
|
||||
name: "single target",
|
||||
targets: []string{"webapp"},
|
||||
overrides: []string{"web*.dockerfile=foo"},
|
||||
check: func(t *testing.T, m map[string]*Target, err error) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "foo", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nomatch",
|
||||
targets: []string{"webapp"},
|
||||
overrides: []string{"nomatch*.dockerfile=foo"},
|
||||
check: func(t *testing.T, m map[string]*Target, err error) {
|
||||
// NOTE: I am unsure whether failing to match should always error out
|
||||
// instead of simply skipping that override.
|
||||
// Let's enforce the error and we can relax it later if users complain.
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, err.Error(), "could not find any target matching 'nomatch*'")
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
m, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil)
|
||||
test.check(t, m, err)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadTargetsCompose(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpdir, err := ioutil.TempDir("", "bake")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
fp := filepath.Join(tmpdir, "docker-compose.yml")
|
||||
err = ioutil.WriteFile(fp, []byte(`
|
||||
version: "3"
|
||||
|
||||
fp := File{
|
||||
Name: "docker-compose.yml",
|
||||
Data: []byte(
|
||||
`version: "3"
|
||||
services:
|
||||
db:
|
||||
build: .
|
||||
@@ -56,14 +196,58 @@ services:
|
||||
dockerfile: Dockerfile.webapp
|
||||
args:
|
||||
buildno: 1
|
||||
`), 0600)
|
||||
require.NoError(t, err)
|
||||
`),
|
||||
}
|
||||
|
||||
fp2 := File{
|
||||
Name: "docker-compose2.yml",
|
||||
Data: []byte(
|
||||
`version: "3"
|
||||
services:
|
||||
newservice:
|
||||
build: .
|
||||
webapp:
|
||||
build:
|
||||
args:
|
||||
buildno2: 12
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
m, err := ReadTargets(ctx, []string{fp}, []string{"default"}, nil)
|
||||
m, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 3, len(m))
|
||||
_, ok := m["newservice"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, ".", *m["webapp"].Context)
|
||||
require.Equal(t, "1", m["webapp"].Args["buildno"])
|
||||
require.Equal(t, "12", m["webapp"].Args["buildno2"])
|
||||
}
|
||||
|
||||
func TestHCLCwdPrefix(t *testing.T) {
|
||||
|
||||
fp := File{
|
||||
Name: "docker-bake.hc",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
context = "cwd://foo"
|
||||
dockerfile = "test"
|
||||
}`),
|
||||
}
|
||||
ctx := context.TODO()
|
||||
m, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok := m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
_, err = TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "test", *m["app"].Dockerfile)
|
||||
require.Equal(t, "foo", *m["app"].Context)
|
||||
}
|
||||
|
@@ -1,24 +1,45 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
)
|
||||
|
||||
func parseCompose(dt []byte) (*composetypes.Config, error) {
|
||||
parsed, err := loader.ParseYAML([]byte(dt))
|
||||
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||
config, err := loader.ParseYAML(dt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
|
||||
return loader.Load(compose.ConfigDetails{
|
||||
ConfigFiles: []compose.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
Config: config,
|
||||
},
|
||||
},
|
||||
Environment: envMap(os.Environ()),
|
||||
}, func(options *loader.Options) {
|
||||
options.SkipNormalization = true
|
||||
})
|
||||
}
|
||||
|
||||
func envMap(env []string) map[string]string {
|
||||
result := make(map[string]string, len(env))
|
||||
for _, s := range env {
|
||||
kv := strings.SplitN(s, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
}
|
||||
result[kv[0]] = kv[1]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ParseCompose(dt []byte) (*Config, error) {
|
||||
cfg, err := parseCompose(dt)
|
||||
if err != nil {
|
||||
@@ -26,14 +47,23 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
}
|
||||
|
||||
var c Config
|
||||
var zeroBuildConfig compose.BuildConfig
|
||||
if len(cfg.Services) > 0 {
|
||||
c.Group = map[string]Group{}
|
||||
c.Target = map[string]Target{}
|
||||
c.Groups = []*Group{}
|
||||
c.Targets = []*Target{}
|
||||
|
||||
var g Group
|
||||
g := &Group{Name: "default"}
|
||||
|
||||
for _, s := range cfg.Services {
|
||||
g.Targets = append(g.Targets, s.Name)
|
||||
|
||||
if s.Build == nil || reflect.DeepEqual(s.Build, zeroBuildConfig) {
|
||||
// if not make sure they're setting an image or it's invalid d-c.yml
|
||||
if s.Image == "" {
|
||||
return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided", s.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var contextPathP *string
|
||||
if s.Build.Context != "" {
|
||||
contextPath := s.Build.Context
|
||||
@@ -44,7 +74,9 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
dockerfilePath := s.Build.Dockerfile
|
||||
dockerfilePathP = &dockerfilePath
|
||||
}
|
||||
t := Target{
|
||||
g.Targets = append(g.Targets, s.Name)
|
||||
t := &Target{
|
||||
Name: s.Name,
|
||||
Context: contextPathP,
|
||||
Dockerfile: dockerfilePathP,
|
||||
Labels: s.Build.Labels,
|
||||
@@ -59,20 +91,22 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
if s.Image != "" {
|
||||
t.Tags = []string{s.Image}
|
||||
}
|
||||
c.Target[s.Name] = t
|
||||
c.Targets = append(c.Targets, t)
|
||||
}
|
||||
c.Group["default"] = g
|
||||
c.Groups = append(c.Groups, g)
|
||||
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func toMap(in composetypes.MappingWithEquals) map[string]string {
|
||||
func toMap(in compose.MappingWithEquals) map[string]string {
|
||||
m := map[string]string{}
|
||||
for k, v := range in {
|
||||
if v != nil {
|
||||
m[k] = *v
|
||||
} else {
|
||||
m[k] = os.Getenv(k)
|
||||
}
|
||||
}
|
||||
return m
|
||||
|
@@ -9,8 +9,6 @@ import (
|
||||
|
||||
func TestParseCompose(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
db:
|
||||
build: ./db
|
||||
@@ -27,35 +25,162 @@ services:
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Group))
|
||||
sort.Strings(c.Group["default"].Targets)
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Group["default"].Targets)
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, c.Groups[0].Name, "default")
|
||||
sort.Strings(c.Groups[0].Targets)
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 2, len(c.Target))
|
||||
require.Equal(t, "./db", *c.Target["db"].Context)
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
return c.Targets[i].Name < c.Targets[j].Name
|
||||
})
|
||||
require.Equal(t, "db", c.Targets[0].Name)
|
||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||
|
||||
require.Equal(t, "./dir", *c.Target["webapp"].Context)
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Target["webapp"].Dockerfile)
|
||||
require.Equal(t, 1, len(c.Target["webapp"].Args))
|
||||
require.Equal(t, "123", c.Target["webapp"].Args["buildno"])
|
||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||
require.Equal(t, "./dir", *c.Targets[1].Context)
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||
}
|
||||
|
||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
external:
|
||||
image: "verycooldb:1337"
|
||||
webapp:
|
||||
build: ./db
|
||||
`)
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
}
|
||||
|
||||
func TestParseComposeTarget(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
db:
|
||||
build:
|
||||
context: ./db
|
||||
target: db
|
||||
webapp:
|
||||
build:
|
||||
context: .
|
||||
target: webapp
|
||||
`)
|
||||
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "db", *c.Target["db"].Target)
|
||||
require.Equal(t, "webapp", *c.Target["webapp"].Target)
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
return c.Targets[i].Name < c.Targets[j].Name
|
||||
})
|
||||
require.Equal(t, "db", c.Targets[0].Name)
|
||||
require.Equal(t, "db", *c.Targets[0].Target)
|
||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||
require.Equal(t, "webapp", *c.Targets[1].Target)
|
||||
}
|
||||
|
||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
build:
|
||||
target: db
|
||||
webapp:
|
||||
build:
|
||||
context: .
|
||||
target: webapp
|
||||
`)
|
||||
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
return c.Targets[i].Name < c.Targets[j].Name
|
||||
})
|
||||
require.Equal(t, c.Targets[0].Name, "db")
|
||||
require.Equal(t, "db", *c.Targets[0].Target)
|
||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||
require.Equal(t, "webapp", *c.Targets[1].Target)
|
||||
}
|
||||
|
||||
func TestBogusCompose(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
labels:
|
||||
- "foo"
|
||||
webapp:
|
||||
build:
|
||||
context: .
|
||||
target: webapp
|
||||
`)
|
||||
|
||||
_, err := ParseCompose(dt)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "has neither an image nor a build context specified: invalid compose project")
|
||||
}
|
||||
|
||||
func TestAdvancedNetwork(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
networks:
|
||||
- example.com
|
||||
build:
|
||||
context: ./db
|
||||
target: db
|
||||
|
||||
networks:
|
||||
example.com:
|
||||
name: example.com
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 10.5.0.0/24
|
||||
ip_range: 10.5.0.0/24
|
||||
gateway: 10.5.0.254
|
||||
`)
|
||||
|
||||
_, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDependsOnList(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
example-container:
|
||||
image: example/fails:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
depends_on:
|
||||
other-container:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- integration-tests
|
||||
|
||||
other-container:
|
||||
image: example/other:latest
|
||||
healthcheck:
|
||||
test: ["CMD", "echo", "success"]
|
||||
retries: 5
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
start_period: 5s
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: test-net
|
||||
`)
|
||||
|
||||
_, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
79
bake/hcl.go
79
bake/hcl.go
@@ -1,11 +1,78 @@
|
||||
package bake
|
||||
|
||||
import "github.com/hashicorp/hcl"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
func ParseHCL(dt []byte) (*Config, error) {
|
||||
var c Config
|
||||
if err := hcl.Unmarshal(dt, &c); err != nil {
|
||||
return nil, err
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclparse"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
)
|
||||
|
||||
func ParseHCLFile(dt []byte, fn string) (*hcl.File, bool, error) {
|
||||
var err error
|
||||
if strings.HasSuffix(fn, ".json") {
|
||||
f, diags := hclparse.NewParser().ParseJSON(dt, fn)
|
||||
if diags.HasErrors() {
|
||||
err = diags
|
||||
}
|
||||
return f, true, err
|
||||
}
|
||||
if strings.HasSuffix(fn, ".hcl") {
|
||||
f, diags := hclparse.NewParser().ParseHCL(dt, fn)
|
||||
if diags.HasErrors() {
|
||||
err = diags
|
||||
}
|
||||
return f, true, err
|
||||
}
|
||||
f, diags := hclparse.NewParser().ParseHCL(dt, fn+".hcl")
|
||||
if diags.HasErrors() {
|
||||
f, diags2 := hclparse.NewParser().ParseJSON(dt, fn+".json")
|
||||
if !diags2.HasErrors() {
|
||||
return f, true, nil
|
||||
}
|
||||
return nil, false, diags
|
||||
}
|
||||
return f, true, nil
|
||||
}
|
||||
|
||||
func formatHCLError(err error, files []File) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
diags, ok := err.(hcl.Diagnostics)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
for _, d := range diags {
|
||||
if d.Severity != hcl.DiagError {
|
||||
continue
|
||||
}
|
||||
if d.Subject != nil {
|
||||
var dt []byte
|
||||
for _, f := range files {
|
||||
if d.Subject.Filename == f.Name {
|
||||
dt = f.Data
|
||||
break
|
||||
}
|
||||
}
|
||||
src := errdefs.Source{
|
||||
Info: &pb.SourceInfo{
|
||||
Filename: d.Subject.Filename,
|
||||
Data: dt,
|
||||
},
|
||||
Ranges: []*pb.Range{toErrRange(d.Subject)},
|
||||
}
|
||||
err = errdefs.WithSource(err, src)
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func toErrRange(in *hcl.Range) *pb.Range {
|
||||
return &pb.Range{
|
||||
Start: pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
||||
End: pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
643
bake/hcl_test.go
643
bake/hcl_test.go
@@ -1,57 +1,622 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseHCL(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
group "default" {
|
||||
targets = ["db", "webapp"]
|
||||
}
|
||||
|
||||
target "db" {
|
||||
context = "./db"
|
||||
tags = ["docker.io/tonistiigi/db"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
context = "./dir"
|
||||
dockerfile = "Dockerfile-alternate"
|
||||
args = {
|
||||
buildno = "123"
|
||||
func TestHCLBasic(t *testing.T) {
|
||||
t.Parallel()
|
||||
dt := []byte(`
|
||||
group "default" {
|
||||
targets = ["db", "webapp"]
|
||||
}
|
||||
}
|
||||
|
||||
target "cross" {
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64"
|
||||
]
|
||||
}
|
||||
|
||||
target "webapp-plus" {
|
||||
inherits = ["webapp", "cross"]
|
||||
args = {
|
||||
IAMCROSS = "true"
|
||||
target "db" {
|
||||
context = "./db"
|
||||
tags = ["docker.io/tonistiigi/db"]
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseHCL(dt)
|
||||
target "webapp" {
|
||||
context = "./dir"
|
||||
dockerfile = "Dockerfile-alternate"
|
||||
args = {
|
||||
buildno = "123"
|
||||
}
|
||||
}
|
||||
|
||||
target "cross" {
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64"
|
||||
]
|
||||
}
|
||||
|
||||
target "webapp-plus" {
|
||||
inherits = ["webapp", "cross"]
|
||||
args = {
|
||||
IAMCROSS = "true"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 4, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "db")
|
||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||
|
||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||
|
||||
require.Equal(t, c.Targets[2].Name, "cross")
|
||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||
|
||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
||||
}
|
||||
|
||||
func TestHCLBasicInJSON(t *testing.T) {
|
||||
dt := []byte(`
|
||||
{
|
||||
"group": {
|
||||
"default": {
|
||||
"targets": ["db", "webapp"]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"db": {
|
||||
"context": "./db",
|
||||
"tags": ["docker.io/tonistiigi/db"]
|
||||
},
|
||||
"webapp": {
|
||||
"context": "./dir",
|
||||
"dockerfile": "Dockerfile-alternate",
|
||||
"args": {
|
||||
"buildno": "123"
|
||||
}
|
||||
},
|
||||
"cross": {
|
||||
"platforms": [
|
||||
"linux/amd64",
|
||||
"linux/arm64"
|
||||
]
|
||||
},
|
||||
"webapp-plus": {
|
||||
"inherits": ["webapp", "cross"],
|
||||
"args": {
|
||||
"IAMCROSS": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Group))
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Group["default"].Targets)
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 4, len(c.Target))
|
||||
require.Equal(t, "./db", *c.Target["db"].Context)
|
||||
require.Equal(t, 4, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "db")
|
||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||
|
||||
require.Equal(t, 1, len(c.Target["webapp"].Args))
|
||||
require.Equal(t, "123", c.Target["webapp"].Args["buildno"])
|
||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||
|
||||
require.Equal(t, 2, len(c.Target["cross"].Platforms))
|
||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Target["cross"].Platforms)
|
||||
require.Equal(t, c.Targets[2].Name, "cross")
|
||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||
|
||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
||||
}
|
||||
|
||||
func TestHCLWithFunctions(t *testing.T) {
|
||||
dt := []byte(`
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
args = {
|
||||
buildno = "${add(123, 1)}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
||||
}
|
||||
|
||||
func TestHCLWithUserDefinedFunctions(t *testing.T) {
|
||||
dt := []byte(`
|
||||
function "increment" {
|
||||
params = [number]
|
||||
result = number + 1
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
args = {
|
||||
buildno = "${increment(123)}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
||||
}
|
||||
|
||||
func TestHCLWithVariables(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "BUILD_NUMBER" {
|
||||
default = "123"
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
args = {
|
||||
buildno = "${BUILD_NUMBER}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, "123", c.Targets[0].Args["buildno"])
|
||||
|
||||
os.Setenv("BUILD_NUMBER", "456")
|
||||
|
||||
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, "456", c.Targets[0].Args["buildno"])
|
||||
}
|
||||
|
||||
func TestHCLWithVariablesInFunctions(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "REPO" {
|
||||
default = "user/repo"
|
||||
}
|
||||
function "tag" {
|
||||
params = [tag]
|
||||
result = ["${REPO}:${tag}"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
tags = tag("v1")
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
||||
|
||||
os.Setenv("REPO", "docker/buildx")
|
||||
|
||||
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
||||
}
|
||||
|
||||
func TestHCLMultiFileSharedVariables(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
default = "abc"
|
||||
}
|
||||
target "app" {
|
||||
args = {
|
||||
v1 = "pre-${FOO}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
dt2 := []byte(`
|
||||
target "app" {
|
||||
args = {
|
||||
v2 = "${FOO}-post"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-abc", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "abc-post", c.Targets[0].Args["v2"])
|
||||
|
||||
os.Setenv("FOO", "def")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "def-post", c.Targets[0].Args["v2"])
|
||||
}
|
||||
|
||||
func TestHCLVarsWithVars(t *testing.T) {
|
||||
os.Unsetenv("FOO")
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
default = upper("${BASE}def")
|
||||
}
|
||||
variable "BAR" {
|
||||
default = "-${FOO}-"
|
||||
}
|
||||
target "app" {
|
||||
args = {
|
||||
v1 = "pre-${BAR}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
dt2 := []byte(`
|
||||
variable "BASE" {
|
||||
default = "abc"
|
||||
}
|
||||
target "app" {
|
||||
args = {
|
||||
v2 = "${FOO}-post"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre--ABCDEF-", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "ABCDEF-post", c.Targets[0].Args["v2"])
|
||||
|
||||
os.Setenv("BASE", "new")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre--NEWDEF-", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "NEWDEF-post", c.Targets[0].Args["v2"])
|
||||
}
|
||||
|
||||
func TestHCLTypedVariables(t *testing.T) {
|
||||
os.Unsetenv("FOO")
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
default = 3
|
||||
}
|
||||
variable "IS_FOO" {
|
||||
default = true
|
||||
}
|
||||
target "app" {
|
||||
args = {
|
||||
v1 = FOO > 5 ? "higher" : "lower"
|
||||
v2 = IS_FOO ? "yes" : "no"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "lower", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "yes", c.Targets[0].Args["v2"])
|
||||
|
||||
os.Setenv("FOO", "5.1")
|
||||
os.Setenv("IS_FOO", "0")
|
||||
|
||||
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "higher", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "no", c.Targets[0].Args["v2"])
|
||||
|
||||
os.Setenv("FOO", "NaN")
|
||||
_, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to parse FOO as number")
|
||||
|
||||
os.Setenv("FOO", "0")
|
||||
os.Setenv("IS_FOO", "maybe")
|
||||
|
||||
_, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to parse IS_FOO as bool")
|
||||
}
|
||||
|
||||
func TestHCLVariableCycle(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
default = BAR
|
||||
}
|
||||
variable "FOO2" {
|
||||
default = FOO
|
||||
}
|
||||
variable "BAR" {
|
||||
default = FOO
|
||||
}
|
||||
target "app" {}
|
||||
`)
|
||||
|
||||
_, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "variable cycle not allowed")
|
||||
}
|
||||
|
||||
func TestHCLAttrs(t *testing.T) {
|
||||
dt := []byte(`
|
||||
FOO="abc"
|
||||
BAR="attr-${FOO}def"
|
||||
target "app" {
|
||||
args = {
|
||||
"v1": BAR
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
|
||||
|
||||
// env does not apply if no variable
|
||||
os.Setenv("FOO", "bar")
|
||||
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
|
||||
// attr-multifile
|
||||
}
|
||||
|
||||
func TestHCLAttrsCustomType(t *testing.T) {
|
||||
dt := []byte(`
|
||||
platforms=["linux/arm64", "linux/amd64"]
|
||||
target "app" {
|
||||
platforms = platforms
|
||||
args = {
|
||||
"v1": platforms[0]
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
||||
require.Equal(t, "linux/arm64", c.Targets[0].Args["v1"])
|
||||
}
|
||||
|
||||
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||
os.Unsetenv("FOO")
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
default = "abc"
|
||||
}
|
||||
target "app" {
|
||||
args = {
|
||||
v1 = "pre-${FOO}"
|
||||
}
|
||||
}
|
||||
`)
|
||||
dt2 := []byte(`
|
||||
FOO="def"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
|
||||
|
||||
os.Setenv("FOO", "ghi")
|
||||
|
||||
c, err = ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.hcl"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-ghi", c.Targets[0].Args["v1"])
|
||||
}
|
||||
|
||||
func TestJSONAttributes(t *testing.T) {
|
||||
dt := []byte(`{"FOO": "abc", "variable": {"BAR": {"default": "def"}}, "target": { "app": { "args": {"v1": "pre-${FOO}-${BAR}"}} } }`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-abc-def", c.Targets[0].Args["v1"])
|
||||
}
|
||||
|
||||
func TestJSONFunctions(t *testing.T) {
|
||||
dt := []byte(`{
|
||||
"FOO": "abc",
|
||||
"function": {
|
||||
"myfunc": {
|
||||
"params": ["inp"],
|
||||
"result": "<${upper(inp)}-${FOO}>"
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"app": {
|
||||
"args": {
|
||||
"v1": "pre-${myfunc(\"foo\")}"
|
||||
}
|
||||
}
|
||||
}}`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "pre-<FOO-abc>", c.Targets[0].Args["v1"])
|
||||
}
|
||||
|
||||
func TestHCLFunctionInAttr(t *testing.T) {
|
||||
dt := []byte(`
|
||||
function "brace" {
|
||||
params = [inp]
|
||||
result = "[${inp}]"
|
||||
}
|
||||
function "myupper" {
|
||||
params = [val]
|
||||
result = "${upper(val)} <> ${brace(v2)}"
|
||||
}
|
||||
|
||||
v1=myupper("foo")
|
||||
v2=lower("BAZ")
|
||||
target "app" {
|
||||
args = {
|
||||
"v1": v1
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "FOO <> [baz]", c.Targets[0].Args["v1"])
|
||||
}
|
||||
|
||||
func TestHCLCombineCompose(t *testing.T) {
|
||||
dt := []byte(`
|
||||
target "app" {
|
||||
context = "dir"
|
||||
args = {
|
||||
v1 = "foo"
|
||||
}
|
||||
}
|
||||
`)
|
||||
dt2 := []byte(`
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
dockerfile: Dockerfile-alternate
|
||||
args:
|
||||
v2: "bar"
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
{Data: dt2, Name: "c2.yml"},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "foo", c.Targets[0].Args["v1"])
|
||||
require.Equal(t, "bar", c.Targets[0].Args["v2"])
|
||||
require.Equal(t, "dir", *c.Targets[0].Context)
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[0].Dockerfile)
|
||||
}
|
||||
|
||||
func TestHCLBuiltinVars(t *testing.T) {
|
||||
dt := []byte(`
|
||||
target "app" {
|
||||
context = BAKE_CMD_CONTEXT
|
||||
dockerfile = "test"
|
||||
}
|
||||
`)
|
||||
|
||||
c, err := ParseFiles([]File{
|
||||
{Data: dt, Name: "c1.hcl"},
|
||||
}, map[string]string{
|
||||
"BAKE_CMD_CONTEXT": "foo",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, c.Targets[0].Name, "app")
|
||||
require.Equal(t, "foo", *c.Targets[0].Context)
|
||||
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
||||
}
|
||||
|
153
bake/hclparser/expr.go
Normal file
153
bake/hclparser/expr.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func funcCalls(exp hcl.Expression) ([]string, hcl.Diagnostics) {
|
||||
node, ok := exp.(hclsyntax.Node)
|
||||
if !ok {
|
||||
fns, err := jsonFuncCallsRecursive(exp)
|
||||
if err != nil {
|
||||
return nil, hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: err.Error(),
|
||||
Subject: exp.Range().Ptr(),
|
||||
Context: exp.Range().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return fns, nil
|
||||
}
|
||||
|
||||
var funcnames []string
|
||||
hcldiags := hclsyntax.VisitAll(node, func(n hclsyntax.Node) hcl.Diagnostics {
|
||||
if fe, ok := n.(*hclsyntax.FunctionCallExpr); ok {
|
||||
funcnames = append(funcnames, fe.Name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if hcldiags.HasErrors() {
|
||||
return nil, hcldiags
|
||||
}
|
||||
return funcnames, nil
|
||||
}
|
||||
|
||||
func jsonFuncCallsRecursive(exp hcl.Expression) ([]string, error) {
|
||||
je, ok := exp.(jsonExp)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid expression type %T", exp)
|
||||
}
|
||||
m := map[string]struct{}{}
|
||||
for _, e := range elementExpressions(je, exp) {
|
||||
if err := appendJSONFuncCalls(e, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
arr := make([]string, 0, len(m))
|
||||
for n := range m {
|
||||
arr = append(arr, n)
|
||||
}
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
func appendJSONFuncCalls(exp hcl.Expression, m map[string]struct{}) error {
|
||||
v := reflect.ValueOf(exp)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return errors.Errorf("invalid json expression kind %T %v", exp, v.Kind())
|
||||
}
|
||||
src := v.Elem().FieldByName("src")
|
||||
if src.IsZero() {
|
||||
return errors.Errorf("%v has no property src", v.Elem().Type())
|
||||
}
|
||||
if src.Kind() != reflect.Interface {
|
||||
return errors.Errorf("%v src is not interface: %v", src.Type(), src.Kind())
|
||||
}
|
||||
src = src.Elem()
|
||||
if src.IsNil() {
|
||||
return nil
|
||||
}
|
||||
if src.Kind() == reflect.Ptr {
|
||||
src = src.Elem()
|
||||
}
|
||||
if src.Kind() != reflect.Struct {
|
||||
return errors.Errorf("%v is not struct: %v", src.Type(), src.Kind())
|
||||
}
|
||||
|
||||
// hcl/v2/json/ast#stringVal
|
||||
val := src.FieldByName("Value")
|
||||
if val.IsZero() {
|
||||
return nil
|
||||
}
|
||||
rng := src.FieldByName("SrcRange")
|
||||
if val.IsZero() {
|
||||
return nil
|
||||
}
|
||||
var stringVal struct {
|
||||
Value string
|
||||
SrcRange hcl.Range
|
||||
}
|
||||
|
||||
if !val.Type().AssignableTo(reflect.ValueOf(stringVal.Value).Type()) {
|
||||
return nil
|
||||
}
|
||||
if !rng.Type().AssignableTo(reflect.ValueOf(stringVal.SrcRange).Type()) {
|
||||
return nil
|
||||
}
|
||||
// reflect.Set does not work for unexported fields
|
||||
stringVal.Value = *(*string)(unsafe.Pointer(val.UnsafeAddr()))
|
||||
stringVal.SrcRange = *(*hcl.Range)(unsafe.Pointer(rng.UnsafeAddr()))
|
||||
|
||||
expr, diags := hclsyntax.ParseExpression([]byte(stringVal.Value), stringVal.SrcRange.Filename, stringVal.SrcRange.Start)
|
||||
if diags.HasErrors() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fns, err := funcCalls(expr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fn := range fns {
|
||||
m[fn] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type jsonExp interface {
|
||||
ExprList() []hcl.Expression
|
||||
ExprMap() []hcl.KeyValuePair
|
||||
}
|
||||
|
||||
func elementExpressions(je jsonExp, exp hcl.Expression) []hcl.Expression {
|
||||
list := je.ExprList()
|
||||
if len(list) != 0 {
|
||||
exp := make([]hcl.Expression, 0, len(list))
|
||||
for _, e := range list {
|
||||
if je, ok := e.(jsonExp); ok {
|
||||
exp = append(exp, elementExpressions(je, e)...)
|
||||
}
|
||||
}
|
||||
return exp
|
||||
}
|
||||
kvlist := je.ExprMap()
|
||||
if len(kvlist) != 0 {
|
||||
exp := make([]hcl.Expression, 0, len(kvlist)*2)
|
||||
for _, p := range kvlist {
|
||||
exp = append(exp, p.Key)
|
||||
if je, ok := p.Value.(jsonExp); ok {
|
||||
exp = append(exp, elementExpressions(je, p.Value)...)
|
||||
}
|
||||
}
|
||||
return exp
|
||||
}
|
||||
return []hcl.Expression{exp}
|
||||
}
|
498
bake/hclparser/hclparser.go
Normal file
498
bake/hclparser/hclparser.go
Normal file
@@ -0,0 +1,498 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/util/userfunc"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/gohcl"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Opt struct {
|
||||
LookupVar func(string) (string, bool)
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||
Body hcl.Body `json:"-" hcl:",body"`
|
||||
}
|
||||
|
||||
type functionDef struct {
|
||||
Name string `json:"-" hcl:"name,label"`
|
||||
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
||||
Variadic *hcl.Attribute `json:"variadic_param,omitempty" hcl:"variadic_params"`
|
||||
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
|
||||
}
|
||||
|
||||
type inputs struct {
|
||||
Variables []*variable `hcl:"variable,block"`
|
||||
Functions []*functionDef `hcl:"function,block"`
|
||||
|
||||
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
opt Opt
|
||||
|
||||
vars map[string]*variable
|
||||
attrs map[string]*hcl.Attribute
|
||||
funcs map[string]*functionDef
|
||||
|
||||
ectx *hcl.EvalContext
|
||||
|
||||
progress map[string]struct{}
|
||||
progressF map[string]struct{}
|
||||
doneF map[string]struct{}
|
||||
}
|
||||
|
||||
func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.Diagnostics {
|
||||
fns, hcldiags := funcCalls(exp)
|
||||
if hcldiags.HasErrors() {
|
||||
return hcldiags
|
||||
}
|
||||
|
||||
for _, fn := range fns {
|
||||
if err := p.resolveFunction(fn); err != nil {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: err.Error(),
|
||||
Subject: exp.Range().Ptr(),
|
||||
Context: exp.Range().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range exp.Variables() {
|
||||
if _, ok := exclude[v.RootName()]; ok {
|
||||
continue
|
||||
}
|
||||
if err := p.resolveValue(v.RootName()); err != nil {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: err.Error(),
|
||||
Subject: v.SourceRange().Ptr(),
|
||||
Context: v.SourceRange().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) resolveFunction(name string) error {
|
||||
if _, ok := p.doneF[name]; ok {
|
||||
return nil
|
||||
}
|
||||
f, ok := p.funcs[name]
|
||||
if !ok {
|
||||
if _, ok := p.ectx.Functions[name]; ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("undefined function %s", name)
|
||||
}
|
||||
if _, ok := p.progressF[name]; ok {
|
||||
return errors.Errorf("function cycle not allowed for %s", name)
|
||||
}
|
||||
p.progressF[name] = struct{}{}
|
||||
|
||||
paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr)
|
||||
if paramsDiags.HasErrors() {
|
||||
return paramsDiags
|
||||
}
|
||||
var diags hcl.Diagnostics
|
||||
params := map[string]struct{}{}
|
||||
for _, paramExpr := range paramExprs {
|
||||
param := hcl.ExprAsKeyword(paramExpr)
|
||||
if param == "" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid param element",
|
||||
Detail: "Each parameter name must be an identifier.",
|
||||
Subject: paramExpr.Range().Ptr(),
|
||||
})
|
||||
}
|
||||
params[param] = struct{}{}
|
||||
}
|
||||
var variadic hcl.Expression
|
||||
if f.Variadic != nil {
|
||||
variadic = f.Variadic.Expr
|
||||
param := hcl.ExprAsKeyword(variadic)
|
||||
if param == "" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid param element",
|
||||
Detail: "Each parameter name must be an identifier.",
|
||||
Subject: f.Variadic.Range.Ptr(),
|
||||
})
|
||||
}
|
||||
params[param] = struct{}{}
|
||||
}
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
if diags := p.loadDeps(f.Result.Expr, params); diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
v, diags := userfunc.NewFunction(f.Params.Expr, variadic, f.Result.Expr, func() *hcl.EvalContext {
|
||||
return p.ectx
|
||||
})
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
p.doneF[name] = struct{}{}
|
||||
p.ectx.Functions[name] = v
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) resolveValue(name string) (err error) {
|
||||
if _, ok := p.ectx.Variables[name]; ok {
|
||||
return nil
|
||||
}
|
||||
if _, ok := p.progress[name]; ok {
|
||||
return errors.Errorf("variable cycle not allowed for %s", name)
|
||||
}
|
||||
p.progress[name] = struct{}{}
|
||||
|
||||
var v *cty.Value
|
||||
defer func() {
|
||||
if v != nil {
|
||||
p.ectx.Variables[name] = *v
|
||||
}
|
||||
}()
|
||||
|
||||
def, ok := p.attrs[name]
|
||||
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
||||
vr, ok := p.vars[name]
|
||||
if !ok {
|
||||
return errors.Errorf("undefined variable %q", name)
|
||||
}
|
||||
def = vr.Default
|
||||
}
|
||||
|
||||
if def == nil {
|
||||
val, ok := p.opt.Vars[name]
|
||||
if !ok {
|
||||
val, _ = p.opt.LookupVar(name)
|
||||
}
|
||||
vv := cty.StringVal(val)
|
||||
v = &vv
|
||||
return
|
||||
}
|
||||
|
||||
if diags := p.loadDeps(def.Expr, nil); diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
vv, diags := def.Expr.Value(p.ectx)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
_, isVar := p.vars[name]
|
||||
|
||||
if envv, ok := p.opt.LookupVar(name); ok && isVar {
|
||||
if vv.Type().Equals(cty.Bool) {
|
||||
b, err := strconv.ParseBool(envv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse %s as bool", name)
|
||||
}
|
||||
vv := cty.BoolVal(b)
|
||||
v = &vv
|
||||
return nil
|
||||
} else if vv.Type().Equals(cty.String) {
|
||||
vv := cty.StringVal(envv)
|
||||
v = &vv
|
||||
return nil
|
||||
} else if vv.Type().Equals(cty.Number) {
|
||||
n, err := strconv.ParseFloat(envv, 64)
|
||||
if err == nil && (math.IsNaN(n) || math.IsInf(n, 0)) {
|
||||
err = errors.Errorf("invalid number value")
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse %s as number", name)
|
||||
}
|
||||
vv := cty.NumberVal(big.NewFloat(n))
|
||||
v = &vv
|
||||
return nil
|
||||
} else {
|
||||
// TODO: support lists with csv values
|
||||
return errors.Errorf("unsupported type %s for variable %s", v.Type(), name)
|
||||
}
|
||||
}
|
||||
v = &vv
|
||||
return nil
|
||||
}
|
||||
|
||||
func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||
reserved := map[string]struct{}{}
|
||||
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||
|
||||
for _, bs := range schema.Blocks {
|
||||
reserved[bs.Type] = struct{}{}
|
||||
}
|
||||
for k := range opt.Vars {
|
||||
reserved[k] = struct{}{}
|
||||
}
|
||||
|
||||
var defs inputs
|
||||
if err := gohcl.DecodeBody(b, nil, &defs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opt.LookupVar == nil {
|
||||
opt.LookupVar = func(string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
p := &parser{
|
||||
opt: opt,
|
||||
|
||||
vars: map[string]*variable{},
|
||||
attrs: map[string]*hcl.Attribute{},
|
||||
funcs: map[string]*functionDef{},
|
||||
|
||||
progress: map[string]struct{}{},
|
||||
progressF: map[string]struct{}{},
|
||||
doneF: map[string]struct{}{},
|
||||
ectx: &hcl.EvalContext{
|
||||
Variables: map[string]cty.Value{},
|
||||
Functions: stdlibFunctions,
|
||||
},
|
||||
}
|
||||
|
||||
for _, v := range defs.Variables {
|
||||
// TODO: validate name
|
||||
if _, ok := reserved[v.Name]; ok {
|
||||
continue
|
||||
}
|
||||
p.vars[v.Name] = v
|
||||
}
|
||||
for _, v := range defs.Functions {
|
||||
// TODO: validate name
|
||||
if _, ok := reserved[v.Name]; ok {
|
||||
continue
|
||||
}
|
||||
p.funcs[v.Name] = v
|
||||
}
|
||||
|
||||
attrs, diags := b.JustAttributes()
|
||||
if diags.HasErrors() {
|
||||
for _, d := range diags {
|
||||
if d.Detail != "Blocks are not allowed here." {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range attrs {
|
||||
if _, ok := reserved[v.Name]; ok {
|
||||
continue
|
||||
}
|
||||
p.attrs[v.Name] = v
|
||||
}
|
||||
delete(p.attrs, "function")
|
||||
|
||||
for k := range p.opt.Vars {
|
||||
_ = p.resolveValue(k)
|
||||
}
|
||||
|
||||
for k := range p.attrs {
|
||||
if err := p.resolveValue(k); err != nil {
|
||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||
return diags
|
||||
}
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid attribute",
|
||||
Detail: err.Error(),
|
||||
Subject: &p.attrs[k].Range,
|
||||
Context: &p.attrs[k].Range,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k := range p.vars {
|
||||
if err := p.resolveValue(k); err != nil {
|
||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||
return diags
|
||||
}
|
||||
r := p.vars[k].Body.MissingItemRange()
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid value",
|
||||
Detail: err.Error(),
|
||||
Subject: &r,
|
||||
Context: &r,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k := range p.funcs {
|
||||
if err := p.resolveFunction(k); err != nil {
|
||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||
return diags
|
||||
}
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid function",
|
||||
Detail: err.Error(),
|
||||
Subject: &p.funcs[k].Params.Range,
|
||||
Context: &p.funcs[k].Params.Range,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
content, _, diags := b.PartialContent(schema)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
for _, a := range content.Attributes {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid attribute",
|
||||
Detail: "global attributes currently not supported",
|
||||
Subject: &a.Range,
|
||||
Context: &a.Range,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
m := map[string]map[string][]*hcl.Block{}
|
||||
for _, b := range content.Blocks {
|
||||
if len(b.Labels) == 0 || len(b.Labels) > 1 {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid block",
|
||||
Detail: fmt.Sprintf("invalid block label: %v", b.Labels),
|
||||
Subject: &b.LabelRanges[0],
|
||||
Context: &b.LabelRanges[0],
|
||||
},
|
||||
}
|
||||
}
|
||||
bm, ok := m[b.Type]
|
||||
if !ok {
|
||||
bm = map[string][]*hcl.Block{}
|
||||
m[b.Type] = bm
|
||||
}
|
||||
|
||||
lbl := b.Labels[0]
|
||||
bm[lbl] = append(bm[lbl], b)
|
||||
}
|
||||
|
||||
vt := reflect.ValueOf(val).Elem().Type()
|
||||
numFields := vt.NumField()
|
||||
|
||||
type value struct {
|
||||
reflect.Value
|
||||
idx int
|
||||
}
|
||||
type field struct {
|
||||
idx int
|
||||
typ reflect.Type
|
||||
values map[string]value
|
||||
}
|
||||
types := map[string]field{}
|
||||
|
||||
for i := 0; i < numFields; i++ {
|
||||
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
||||
|
||||
types[tags[0]] = field{
|
||||
idx: i,
|
||||
typ: vt.Field(i).Type,
|
||||
values: make(map[string]value),
|
||||
}
|
||||
}
|
||||
|
||||
diags = hcl.Diagnostics{}
|
||||
for _, b := range content.Blocks {
|
||||
v := reflect.ValueOf(val)
|
||||
|
||||
t, ok := types[b.Type]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
vv := reflect.New(t.typ.Elem().Elem())
|
||||
diag := gohcl.DecodeBody(b.Body, p.ectx, vv.Interface())
|
||||
if diag.HasErrors() {
|
||||
diags = append(diags, diag...)
|
||||
continue
|
||||
}
|
||||
|
||||
lblIndex := setLabel(vv, b.Labels[0])
|
||||
|
||||
oldValue, exists := t.values[b.Labels[0]]
|
||||
if !exists && lblIndex != -1 {
|
||||
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
||||
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
|
||||
if b.Labels[0] == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
||||
exists = true
|
||||
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if exists {
|
||||
if m := oldValue.Value.MethodByName("Merge"); m.IsValid() {
|
||||
m.Call([]reflect.Value{vv})
|
||||
} else {
|
||||
v.Elem().Field(t.idx).Index(oldValue.idx).Set(vv)
|
||||
}
|
||||
} else {
|
||||
slice := v.Elem().Field(t.idx)
|
||||
if slice.IsNil() {
|
||||
slice = reflect.New(t.typ).Elem()
|
||||
}
|
||||
t.values[b.Labels[0]] = value{Value: vv, idx: slice.Len()}
|
||||
v.Elem().Field(t.idx).Set(reflect.Append(slice, vv))
|
||||
}
|
||||
}
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setLabel(v reflect.Value, lbl string) int {
|
||||
// cache field index?
|
||||
numFields := v.Elem().Type().NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
for _, t := range strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",") {
|
||||
if t == "label" {
|
||||
v.Elem().Field(i).Set(reflect.ValueOf(lbl))
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
111
bake/hclparser/stdlib.go
Normal file
111
bake/hclparser/stdlib.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||
"github.com/hashicorp/go-cty-funcs/crypto"
|
||||
"github.com/hashicorp/go-cty-funcs/encoding"
|
||||
"github.com/hashicorp/go-cty-funcs/uuid"
|
||||
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
||||
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||
)
|
||||
|
||||
var stdlibFunctions = map[string]function.Function{
|
||||
"absolute": stdlib.AbsoluteFunc,
|
||||
"add": stdlib.AddFunc,
|
||||
"and": stdlib.AndFunc,
|
||||
"base64decode": encoding.Base64DecodeFunc,
|
||||
"base64encode": encoding.Base64EncodeFunc,
|
||||
"bcrypt": crypto.BcryptFunc,
|
||||
"byteslen": stdlib.BytesLenFunc,
|
||||
"bytesslice": stdlib.BytesSliceFunc,
|
||||
"can": tryfunc.CanFunc,
|
||||
"ceil": stdlib.CeilFunc,
|
||||
"chomp": stdlib.ChompFunc,
|
||||
"chunklist": stdlib.ChunklistFunc,
|
||||
"cidrhost": cidr.HostFunc,
|
||||
"cidrnetmask": cidr.NetmaskFunc,
|
||||
"cidrsubnet": cidr.SubnetFunc,
|
||||
"cidrsubnets": cidr.SubnetsFunc,
|
||||
"csvdecode": stdlib.CSVDecodeFunc,
|
||||
"coalesce": stdlib.CoalesceFunc,
|
||||
"coalescelist": stdlib.CoalesceListFunc,
|
||||
"compact": stdlib.CompactFunc,
|
||||
"concat": stdlib.ConcatFunc,
|
||||
"contains": stdlib.ContainsFunc,
|
||||
"convert": typeexpr.ConvertFunc,
|
||||
"distinct": stdlib.DistinctFunc,
|
||||
"divide": stdlib.DivideFunc,
|
||||
"element": stdlib.ElementFunc,
|
||||
"equal": stdlib.EqualFunc,
|
||||
"flatten": stdlib.FlattenFunc,
|
||||
"floor": stdlib.FloorFunc,
|
||||
"formatdate": stdlib.FormatDateFunc,
|
||||
"format": stdlib.FormatFunc,
|
||||
"formatlist": stdlib.FormatListFunc,
|
||||
"greaterthan": stdlib.GreaterThanFunc,
|
||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
||||
"hasindex": stdlib.HasIndexFunc,
|
||||
"indent": stdlib.IndentFunc,
|
||||
"index": stdlib.IndexFunc,
|
||||
"int": stdlib.IntFunc,
|
||||
"jsondecode": stdlib.JSONDecodeFunc,
|
||||
"jsonencode": stdlib.JSONEncodeFunc,
|
||||
"keys": stdlib.KeysFunc,
|
||||
"join": stdlib.JoinFunc,
|
||||
"length": stdlib.LengthFunc,
|
||||
"lessthan": stdlib.LessThanFunc,
|
||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
||||
"log": stdlib.LogFunc,
|
||||
"lookup": stdlib.LookupFunc,
|
||||
"lower": stdlib.LowerFunc,
|
||||
"max": stdlib.MaxFunc,
|
||||
"md5": crypto.Md5Func,
|
||||
"merge": stdlib.MergeFunc,
|
||||
"min": stdlib.MinFunc,
|
||||
"modulo": stdlib.ModuloFunc,
|
||||
"multiply": stdlib.MultiplyFunc,
|
||||
"negate": stdlib.NegateFunc,
|
||||
"notequal": stdlib.NotEqualFunc,
|
||||
"not": stdlib.NotFunc,
|
||||
"or": stdlib.OrFunc,
|
||||
"parseint": stdlib.ParseIntFunc,
|
||||
"pow": stdlib.PowFunc,
|
||||
"range": stdlib.RangeFunc,
|
||||
"regexall": stdlib.RegexAllFunc,
|
||||
"regex": stdlib.RegexFunc,
|
||||
"regex_replace": stdlib.RegexReplaceFunc,
|
||||
"reverse": stdlib.ReverseFunc,
|
||||
"reverselist": stdlib.ReverseListFunc,
|
||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
||||
"sethaselement": stdlib.SetHasElementFunc,
|
||||
"setintersection": stdlib.SetIntersectionFunc,
|
||||
"setproduct": stdlib.SetProductFunc,
|
||||
"setsubtract": stdlib.SetSubtractFunc,
|
||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
||||
"setunion": stdlib.SetUnionFunc,
|
||||
"sha1": crypto.Sha1Func,
|
||||
"sha256": crypto.Sha256Func,
|
||||
"sha512": crypto.Sha512Func,
|
||||
"signum": stdlib.SignumFunc,
|
||||
"slice": stdlib.SliceFunc,
|
||||
"sort": stdlib.SortFunc,
|
||||
"split": stdlib.SplitFunc,
|
||||
"strlen": stdlib.StrlenFunc,
|
||||
"substr": stdlib.SubstrFunc,
|
||||
"subtract": stdlib.SubtractFunc,
|
||||
"timeadd": stdlib.TimeAddFunc,
|
||||
"title": stdlib.TitleFunc,
|
||||
"trim": stdlib.TrimFunc,
|
||||
"trimprefix": stdlib.TrimPrefixFunc,
|
||||
"trimspace": stdlib.TrimSpaceFunc,
|
||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
||||
"try": tryfunc.TryFunc,
|
||||
"upper": stdlib.UpperFunc,
|
||||
"urlencode": encoding.URLEncodeFunc,
|
||||
"uuidv4": uuid.V4Func,
|
||||
"uuidv5": uuid.V5Func,
|
||||
"values": stdlib.ValuesFunc,
|
||||
"zipmap": stdlib.ZipmapFunc,
|
||||
}
|
236
bake/remote.go
Normal file
236
bake/remote.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Input struct {
|
||||
State *llb.State
|
||||
URL string
|
||||
}
|
||||
|
||||
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||
st, filename, ok := detectHTTPContext(url)
|
||||
if !ok {
|
||||
st, ok = detectGitContext(url)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("not url context")
|
||||
}
|
||||
}
|
||||
|
||||
inp := &Input{State: st, URL: url}
|
||||
var files []File
|
||||
|
||||
var di *build.DriverInfo
|
||||
for _, d := range dis {
|
||||
if d.Err == nil {
|
||||
di = &d
|
||||
continue
|
||||
}
|
||||
}
|
||||
if di == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
c, err := driver.Boot(ctx, di.Driver, pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
def, err := st.Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := c.Solve(ctx, gwclient.SolveRequest{
|
||||
Definition: def.ToPB(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref, err := res.SingleRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if filename != "" {
|
||||
files, err = filesFromURLRef(ctx, c, ref, inp, filename, names)
|
||||
} else {
|
||||
files, err = filesFromRef(ctx, ref, names)
|
||||
}
|
||||
return nil, err
|
||||
}, ch)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return files, inp, nil
|
||||
}
|
||||
|
||||
func IsRemoteURL(url string) bool {
|
||||
if _, _, ok := detectHTTPContext(url); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := detectGitContext(url); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func detectHTTPContext(url string) (*llb.State, string, bool) {
|
||||
if httpPrefix.MatchString(url) {
|
||||
httpContext := llb.HTTP(url, llb.Filename("context"), llb.WithCustomName("[internal] load remote build context"))
|
||||
return &httpContext, "context", true
|
||||
}
|
||||
return nil, "", false
|
||||
}
|
||||
|
||||
func detectGitContext(ref string) (*llb.State, bool) {
|
||||
found := false
|
||||
if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) {
|
||||
found = true
|
||||
}
|
||||
|
||||
for _, prefix := range []string{"git://", "github.com/", "git@"} {
|
||||
if strings.HasPrefix(ref, prefix) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parts := strings.SplitN(ref, "#", 2)
|
||||
branch := ""
|
||||
if len(parts) > 1 {
|
||||
branch = parts[1]
|
||||
}
|
||||
gitOpts := []llb.GitOption{llb.WithCustomName("[internal] load git source " + ref)}
|
||||
|
||||
st := llb.Git(parts[0], branch, gitOpts...)
|
||||
return &st, true
|
||||
}
|
||||
|
||||
func isArchive(header []byte) bool {
|
||||
for _, m := range [][]byte{
|
||||
{0x42, 0x5A, 0x68}, // bzip2
|
||||
{0x1F, 0x8B, 0x08}, // gzip
|
||||
{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz
|
||||
} {
|
||||
if len(header) < len(m) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, header[:len(m)]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func filesFromURLRef(ctx context.Context, c gwclient.Client, ref gwclient.Reference, inp *Input, filename string, names []string) ([]File, error) {
|
||||
stat, err := ref.StatFile(ctx, gwclient.StatRequest{Path: filename})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||
Filename: filename,
|
||||
Range: &gwclient.FileRange{
|
||||
Length: 1024,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isArchive(dt) {
|
||||
bc := llb.Scratch().File(llb.Copy(inp.State, filename, "/", &llb.CopyInfo{
|
||||
AttemptUnpack: true,
|
||||
}))
|
||||
inp.State = &bc
|
||||
inp.URL = ""
|
||||
def, err := bc.Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := c.Solve(ctx, gwclient.SolveRequest{
|
||||
Definition: def.ToPB(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref, err := res.SingleRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filesFromRef(ctx, ref, names)
|
||||
}
|
||||
|
||||
inp.State = nil
|
||||
name := inp.URL
|
||||
inp.URL = ""
|
||||
|
||||
if len(dt) > stat.Size() {
|
||||
if stat.Size() > 1024*512 {
|
||||
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size")
|
||||
}
|
||||
|
||||
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||
Filename: filename,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return []File{{Name: name, Data: dt}}, nil
|
||||
}
|
||||
|
||||
func filesFromRef(ctx context.Context, ref gwclient.Reference, names []string) ([]File, error) {
|
||||
// TODO: auto-remove parent dir in needed
|
||||
var files []File
|
||||
|
||||
isDefault := false
|
||||
if len(names) == 0 {
|
||||
isDefault = true
|
||||
names = defaultFilenames()
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
_, err := ref.StatFile(ctx, gwclient.StatRequest{Path: name})
|
||||
if err != nil {
|
||||
if isDefault {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{Filename: name})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, File{Name: name, Data: dt})
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
472
build/build.go
472
build/build.go
@@ -3,6 +3,7 @@ package build
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
@@ -19,15 +21,24 @@ import (
|
||||
"github.com/docker/buildx/util/progress"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -55,13 +66,16 @@ type Options struct {
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
CacheTo []client.CacheOptionsEntry
|
||||
|
||||
Allow []entitlements.Entitlement
|
||||
// DockerTarget
|
||||
}
|
||||
|
||||
type Inputs struct {
|
||||
ContextPath string
|
||||
DockerfilePath string
|
||||
InStream io.Reader
|
||||
ContextPath string
|
||||
DockerfilePath string
|
||||
InStream io.Reader
|
||||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
}
|
||||
|
||||
type DriverInfo struct {
|
||||
@@ -100,6 +114,7 @@ type driverPair struct {
|
||||
driverIndex int
|
||||
platforms []specs.Platform
|
||||
so *client.SolveOpt
|
||||
bopts gateway.BuildOpts
|
||||
}
|
||||
|
||||
func driverIndexes(m map[string][]driverPair) []int {
|
||||
@@ -170,8 +185,45 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
||||
return m
|
||||
}
|
||||
|
||||
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||
func resolveDrivers(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||
dps, clients, err := resolveDriversBase(ctx, drivers, auth, opt, pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bopts := make([]gateway.BuildOpts, len(clients))
|
||||
|
||||
span, ctx := tracing.StartSpan(ctx, "load buildkit capabilities", trace.WithSpanKind(trace.SpanKindInternal))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, c := range clients {
|
||||
func(i int, c *client.Client) {
|
||||
eg.Go(func() error {
|
||||
clients[i].Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
bopts[i] = c.BuildOpts()
|
||||
return nil, nil
|
||||
}, nil)
|
||||
return nil
|
||||
})
|
||||
}(i, c)
|
||||
}
|
||||
|
||||
err = eg.Wait()
|
||||
span.RecordError(err)
|
||||
span.End()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for key := range dps {
|
||||
for i, dp := range dps[key] {
|
||||
dps[key][i].bopts = bopts[dp.driverIndex]
|
||||
}
|
||||
}
|
||||
|
||||
return dps, clients, nil
|
||||
}
|
||||
|
||||
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||
availablePlatforms := map[string]int{}
|
||||
for i, d := range drivers {
|
||||
for _, p := range d.Platform {
|
||||
@@ -236,6 +288,7 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
|
||||
workers[i] = ww
|
||||
return nil
|
||||
})
|
||||
|
||||
}(i)
|
||||
}
|
||||
|
||||
@@ -276,14 +329,7 @@ func toRepoOnly(in string) (string, error) {
|
||||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func isDefaultMobyDriver(d driver.Driver) bool {
|
||||
_, ok := d.(interface {
|
||||
IsDefaultMobyDriver()
|
||||
})
|
||||
return ok
|
||||
}
|
||||
|
||||
func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
for _, f := range defers {
|
||||
@@ -298,9 +344,6 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
}()
|
||||
|
||||
if opt.ImageIDFile != "" {
|
||||
if multiDriver || len(opt.Platforms) != 0 {
|
||||
return nil, nil, errors.Errorf("image ID file cannot be specified when building for multiple platforms")
|
||||
}
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if err := os.Remove(opt.ImageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return nil, nil, errors.Wrap(err, "removing image ID file")
|
||||
@@ -323,12 +366,39 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
}
|
||||
}
|
||||
|
||||
cacheTo := make([]client.CacheOptionsEntry, 0, len(opt.CacheTo))
|
||||
for _, e := range opt.CacheTo {
|
||||
if e.Type == "gha" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
cacheTo = append(cacheTo, e)
|
||||
}
|
||||
|
||||
cacheFrom := make([]client.CacheOptionsEntry, 0, len(opt.CacheFrom))
|
||||
for _, e := range opt.CacheFrom {
|
||||
if e.Type == "gha" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
cacheFrom = append(cacheFrom, e)
|
||||
}
|
||||
|
||||
so := client.SolveOpt{
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: map[string]string{},
|
||||
LocalDirs: map[string]string{},
|
||||
CacheExports: opt.CacheTo,
|
||||
CacheImports: opt.CacheFrom,
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: map[string]string{},
|
||||
LocalDirs: map[string]string{},
|
||||
CacheExports: cacheTo,
|
||||
CacheImports: cacheFrom,
|
||||
AllowedEntitlements: opt.Allow,
|
||||
}
|
||||
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||
if v, _ := strconv.ParseBool(v); v {
|
||||
so.FrontendAttrs["multi-platform"] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
if multiDriver {
|
||||
@@ -336,15 +406,11 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
so.FrontendAttrs["multi-platform"] = "true"
|
||||
}
|
||||
|
||||
_, isDefaultMobyDriver := d.(interface {
|
||||
IsDefaultMobyDriver()
|
||||
})
|
||||
|
||||
switch len(opt.Exports) {
|
||||
case 1:
|
||||
// valid
|
||||
case 0:
|
||||
if isDefaultMobyDriver {
|
||||
if d.IsMobyDriver() && !noDefaultLoad() {
|
||||
// backwards compat for docker driver only:
|
||||
// this ensures the build results in a docker image.
|
||||
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
||||
@@ -379,6 +445,15 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
}
|
||||
}
|
||||
|
||||
// cacheonly is a fake exporter to opt out of default behaviors
|
||||
exports := make([]client.ExportEntry, 0, len(opt.Exports))
|
||||
for _, e := range opt.Exports {
|
||||
if e.Type != "cacheonly" {
|
||||
exports = append(exports, e)
|
||||
}
|
||||
}
|
||||
opt.Exports = exports
|
||||
|
||||
// set up exporters
|
||||
for i, e := range opt.Exports {
|
||||
if (e.Type == "local" || e.Type == "tar") && opt.ImageIDFile != "" {
|
||||
@@ -388,8 +463,11 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
return nil, nil, notSupported(d, driver.OCIExporter)
|
||||
}
|
||||
if e.Type == "docker" {
|
||||
if len(opt.Platforms) > 1 {
|
||||
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
}
|
||||
if e.Output == nil {
|
||||
if isDefaultMobyDriver {
|
||||
if d.IsMobyDriver() {
|
||||
e.Type = "image"
|
||||
} else {
|
||||
w, cancel, err := dl(e.Attrs["context"])
|
||||
@@ -397,17 +475,19 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, cancel)
|
||||
opt.Exports[i].Output = w
|
||||
opt.Exports[i].Output = wrapWriteCloser(w)
|
||||
}
|
||||
} else if !d.Features()[driver.DockerExporter] {
|
||||
return nil, nil, notSupported(d, driver.DockerExporter)
|
||||
}
|
||||
}
|
||||
if e.Type == "image" && isDefaultMobyDriver {
|
||||
if e.Type == "image" && d.IsMobyDriver() {
|
||||
opt.Exports[i].Type = "moby"
|
||||
if e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, nil, errors.Errorf("auto-push is currently not implemented for docker driver")
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -416,7 +496,7 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
so.Exports = opt.Exports
|
||||
so.Session = opt.Session
|
||||
|
||||
releaseLoad, err := LoadInputs(opt.Inputs, &so)
|
||||
releaseLoad, err := LoadInputs(ctx, d, opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -452,7 +532,10 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
|
||||
// setup networkmode
|
||||
switch opt.NetworkMode {
|
||||
case "host", "none":
|
||||
case "host":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
||||
case "none":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
case "", "default":
|
||||
default:
|
||||
@@ -469,7 +552,7 @@ func toSolveOpt(d driver.Driver, multiDriver bool, opt Options, dl dockerLoadCal
|
||||
return &so, releaseF, nil
|
||||
}
|
||||
|
||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, auth Auth, pw progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, auth Auth, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(drivers) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
@@ -481,13 +564,13 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
|
||||
var noMobyDriver driver.Driver
|
||||
for _, d := range drivers {
|
||||
if !isDefaultMobyDriver(d.Driver) {
|
||||
if !d.Driver.IsMobyDriver() {
|
||||
noMobyDriver = d.Driver
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if noMobyDriver != nil {
|
||||
if noMobyDriver != nil && !noDefaultLoad() {
|
||||
for _, opt := range opt {
|
||||
if len(opt.Exports) == 0 {
|
||||
logrus.Warnf("No output specified for %s driver. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load", noMobyDriver.Factory().Name())
|
||||
@@ -496,10 +579,8 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
}
|
||||
|
||||
m, clients, err := resolveDrivers(ctx, drivers, opt, pw)
|
||||
m, clients, err := resolveDrivers(ctx, drivers, auth, opt, w)
|
||||
if err != nil {
|
||||
close(pw.Status())
|
||||
<-pw.Done()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -512,16 +593,19 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
}()
|
||||
|
||||
mw := progress.NewMultiWriter(pw)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
for k, opt := range opt {
|
||||
multiDriver := len(m[k]) > 1
|
||||
hasMobyDriver := false
|
||||
for i, dp := range m[k] {
|
||||
d := drivers[dp.driverIndex].Driver
|
||||
if d.IsMobyDriver() {
|
||||
hasMobyDriver = true
|
||||
}
|
||||
opt.Platforms = dp.platforms
|
||||
so, release, err := toSolveOpt(d, multiDriver, opt, func(name string) (io.WriteCloser, func(), error) {
|
||||
return newDockerLoader(ctx, docker, name, mw)
|
||||
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, w, func(name string) (io.WriteCloser, func(), error) {
|
||||
return newDockerLoader(ctx, docker, name, w)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -529,6 +613,28 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
defers = append(defers, release)
|
||||
m[k][i].so = so
|
||||
}
|
||||
for _, at := range opt.Session {
|
||||
if s, ok := at.(interface {
|
||||
SetLogger(progresswriter.Logger)
|
||||
}); ok {
|
||||
s.SetLogger(func(s *client.SolveStatus) {
|
||||
w.Write(s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// validate for multi-node push
|
||||
if hasMobyDriver && multiDriver {
|
||||
for _, dp := range m[k] {
|
||||
for _, e := range dp.so.Exports {
|
||||
if e.Type == "moby" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp = map[string]*client.SolveResponse{}
|
||||
@@ -537,20 +643,31 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
multiTarget := len(opt) > 1
|
||||
|
||||
for k, opt := range opt {
|
||||
err := func() error {
|
||||
err := func(k string) error {
|
||||
opt := opt
|
||||
dps := m[k]
|
||||
multiDriver := len(m[k]) > 1
|
||||
|
||||
var span trace.Span
|
||||
ctx := ctx
|
||||
if multiTarget {
|
||||
span, ctx = tracing.StartSpan(ctx, k)
|
||||
}
|
||||
|
||||
res := make([]*client.SolveResponse, len(dps))
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(len(dps))
|
||||
|
||||
var pushNames string
|
||||
|
||||
eg.Go(func() error {
|
||||
pw := mw.WithPrefix("default", false)
|
||||
defer close(pw.Status())
|
||||
eg.Go(func() (err error) {
|
||||
defer func() {
|
||||
if span != nil {
|
||||
span.RecordError(err)
|
||||
span.End()
|
||||
}
|
||||
}()
|
||||
pw := progress.WithPrefix(w, "default", false)
|
||||
wg.Wait()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -653,27 +770,43 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
|
||||
func(i int, dp driverPair, so client.SolveOpt) {
|
||||
pw := mw.WithPrefix(k, multiTarget)
|
||||
pw := progress.WithPrefix(w, k, multiTarget)
|
||||
|
||||
c := clients[dp.driverIndex]
|
||||
|
||||
var statusCh chan *client.SolveStatus
|
||||
if pw != nil {
|
||||
pw = progress.ResetTime(pw)
|
||||
statusCh = pw.Status()
|
||||
eg.Go(func() error {
|
||||
<-pw.Done()
|
||||
return pw.Err()
|
||||
})
|
||||
}
|
||||
pw = progress.ResetTime(pw)
|
||||
|
||||
eg.Go(func() error {
|
||||
defer wg.Done()
|
||||
rr, err := c.Solve(ctx, nil, so, statusCh)
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
rr, err := c.Solve(ctx, nil, so, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res[i] = rr
|
||||
|
||||
d := drivers[dp.driverIndex].Driver
|
||||
if d.IsMobyDriver() {
|
||||
for _, e := range so.Exports {
|
||||
if e.Type == "moby" && e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
pushNames = e.Attrs["name"]
|
||||
if pushNames == "" {
|
||||
return errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
pw := progress.ResetTime(pw)
|
||||
for _, name := range strings.Split(pushNames, ",") {
|
||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||
return pushWithMoby(ctx, d, name, l)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -681,7 +814,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
}(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -694,6 +827,86 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.SubLogger) error {
|
||||
api := d.Config().DockerAPI
|
||||
if api == nil {
|
||||
return errors.Errorf("invalid empty Docker API reference") // should never happen
|
||||
}
|
||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc, err := api.ImagePush(ctx, name, types.ImagePushOptions{
|
||||
RegistryAuth: creds,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
started := map[string]*client.VertexStatus{}
|
||||
|
||||
defer func() {
|
||||
for _, st := range started {
|
||||
if st.Completed == nil {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
l.SetStatus(st)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
dec := json.NewDecoder(rc)
|
||||
var parsedError error
|
||||
for {
|
||||
var jm jsonmessage.JSONMessage
|
||||
if err := dec.Decode(&jm); err != nil {
|
||||
if parsedError != nil {
|
||||
return parsedError
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if jm.ID != "" {
|
||||
id := "pushing layer " + jm.ID
|
||||
st, ok := started[id]
|
||||
if !ok {
|
||||
if jm.Progress != nil || jm.Status == "Pushed" {
|
||||
now := time.Now()
|
||||
st = &client.VertexStatus{
|
||||
ID: id,
|
||||
Started: &now,
|
||||
}
|
||||
started[id] = st
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
st.Timestamp = time.Now()
|
||||
if jm.Progress != nil {
|
||||
st.Current = jm.Progress.Current
|
||||
st.Total = jm.Progress.Total
|
||||
}
|
||||
if jm.Error != nil {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
}
|
||||
if jm.Status == "Pushed" {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
st.Current = st.Total
|
||||
}
|
||||
l.SetStatus(st)
|
||||
}
|
||||
if jm.Error != nil {
|
||||
parsedError = jm.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
dir, err := ioutil.TempDir("", "dockerfile")
|
||||
if err != nil {
|
||||
@@ -710,7 +923,7 @@ func createTempDockerfile(r io.Reader) (string, error) {
|
||||
return dir, err
|
||||
}
|
||||
|
||||
func LoadInputs(inp Inputs, target *client.SolveOpt) (func(), error) {
|
||||
func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
@@ -726,38 +939,45 @@ func LoadInputs(inp Inputs, target *client.SolveOpt) (func(), error) {
|
||||
)
|
||||
|
||||
switch {
|
||||
case inp.ContextState != nil:
|
||||
if target.FrontendInputs == nil {
|
||||
target.FrontendInputs = make(map[string]llb.State)
|
||||
}
|
||||
target.FrontendInputs["context"] = *inp.ContextState
|
||||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, errStdinConflict
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(os.Stdin)
|
||||
buf := bufio.NewReader(inp.InStream)
|
||||
magic, err := buf.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
|
||||
if isArchive(magic) {
|
||||
// stdin is context
|
||||
up := uploadprovider.New()
|
||||
target.FrontendAttrs["context"] = up.Add(buf)
|
||||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errDockerfileConflict
|
||||
if !(err == io.EOF && len(magic) == 0) {
|
||||
if isArchive(magic) {
|
||||
// stdin is context
|
||||
up := uploadprovider.New()
|
||||
target.FrontendAttrs["context"] = up.Add(buf)
|
||||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errDockerfileConflict
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = buf
|
||||
inp.ContextPath, _ = ioutil.TempDir("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
target.LocalDirs["context"] = inp.ContextPath
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = buf
|
||||
inp.ContextPath, _ = ioutil.TempDir("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
target.LocalDirs["context"] = inp.ContextPath
|
||||
}
|
||||
|
||||
case isLocalDir(inp.ContextPath):
|
||||
target.LocalDirs["context"] = inp.ContextPath
|
||||
switch inp.DockerfilePath {
|
||||
case "-":
|
||||
dockerfileReader = os.Stdin
|
||||
dockerfileReader = inp.InStream
|
||||
case "":
|
||||
dockerfileDir = inp.ContextPath
|
||||
default:
|
||||
@@ -774,23 +994,41 @@ func LoadInputs(inp Inputs, target *client.SolveOpt) (func(), error) {
|
||||
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
dockerfileReader = strings.NewReader(inp.DockerfileInline)
|
||||
}
|
||||
|
||||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||
}
|
||||
if urlutil.IsURL(inp.DockerfilePath) {
|
||||
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||
delete(target.FrontendInputs, "dockerfile")
|
||||
}
|
||||
|
||||
if dockerfileName == "" {
|
||||
dockerfileName = "Dockerfile"
|
||||
}
|
||||
target.FrontendAttrs["filename"] = dockerfileName
|
||||
|
||||
if dockerfileDir != "" {
|
||||
target.LocalDirs["dockerfile"] = dockerfileDir
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
}
|
||||
|
||||
target.FrontendAttrs["filename"] = dockerfileName
|
||||
|
||||
release := func() {
|
||||
for _, dir := range toRemove {
|
||||
os.RemoveAll(dir)
|
||||
@@ -805,40 +1043,60 @@ func notSupported(d driver.Driver, f driver.Feature) error {
|
||||
|
||||
type dockerLoadCallback func(name string) (io.WriteCloser, func(), error)
|
||||
|
||||
func newDockerLoader(ctx context.Context, d DockerAPI, name string, mw *progress.MultiWriter) (io.WriteCloser, func(), error) {
|
||||
func newDockerLoader(ctx context.Context, d DockerAPI, name string, status progress.Writer) (io.WriteCloser, func(), error) {
|
||||
c, err := d.DockerAPI(name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
started := make(chan struct{})
|
||||
w := &waitingWriter{
|
||||
done := make(chan struct{})
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var w *waitingWriter
|
||||
w = &waitingWriter{
|
||||
PipeWriter: pw,
|
||||
f: func() {
|
||||
resp, err := c.ImageLoad(ctx, pr, false)
|
||||
defer close(done)
|
||||
if err != nil {
|
||||
pr.CloseWithError(err)
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
return
|
||||
}
|
||||
prog := mw.WithPrefix("", false)
|
||||
close(started)
|
||||
prog := progress.WithPrefix(status, "", false)
|
||||
progress.FromReader(prog, "importing to docker", resp.Body)
|
||||
},
|
||||
started: started,
|
||||
done: done,
|
||||
cancel: cancel,
|
||||
}
|
||||
return w, func() {
|
||||
pr.Close()
|
||||
}, nil
|
||||
}
|
||||
|
||||
func noDefaultLoad() bool {
|
||||
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
logrus.Warnf("invalid non-bool value for BUILDX_NO_DEFAULT_LOAD: %s", v)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
type waitingWriter struct {
|
||||
*io.PipeWriter
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
started chan struct{}
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
done chan struct{}
|
||||
cancel func()
|
||||
}
|
||||
|
||||
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||
@@ -850,6 +1108,48 @@ func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||
|
||||
func (w *waitingWriter) Close() error {
|
||||
err := w.PipeWriter.Close()
|
||||
<-w.started
|
||||
<-w.done
|
||||
if err == nil {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// handle https://github.com/moby/moby/pull/10858
|
||||
func handleLowercaseDockerfile(dir, p string) string {
|
||||
if filepath.Base(p) != "Dockerfile" {
|
||||
return p
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Dir(filepath.Join(dir, p)))
|
||||
if err != nil {
|
||||
return p
|
||||
}
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return p
|
||||
}
|
||||
|
||||
foundLowerCase := false
|
||||
for _, n := range names {
|
||||
if n == "Dockerfile" {
|
||||
return p
|
||||
}
|
||||
if n == "dockerfile" {
|
||||
foundLowerCase = true
|
||||
}
|
||||
}
|
||||
if foundLowerCase {
|
||||
return filepath.Join(filepath.Dir(p), "dockerfile")
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
||||
return func(map[string]string) (io.WriteCloser, error) {
|
||||
return wc, nil
|
||||
}
|
||||
}
|
||||
|
@@ -1,60 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ParseCacheEntry(in []string) ([]client.CacheOptionsEntry, error) {
|
||||
imports := make([]client.CacheOptionsEntry, 0, len(in))
|
||||
for _, in := range in {
|
||||
csvReader := csv.NewReader(strings.NewReader(in))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isRefOnlyFormat(fields) {
|
||||
for _, field := range fields {
|
||||
imports = append(imports, client.CacheOptionsEntry{
|
||||
Type: "registry",
|
||||
Attrs: map[string]string{"ref": field},
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
im := client.CacheOptionsEntry{
|
||||
Attrs: map[string]string{},
|
||||
}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf("invalid value %s", field)
|
||||
}
|
||||
key := strings.ToLower(parts[0])
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "type":
|
||||
im.Type = value
|
||||
default:
|
||||
im.Attrs[key] = value
|
||||
}
|
||||
}
|
||||
if im.Type == "" {
|
||||
return nil, errors.Errorf("type required form> %q", in)
|
||||
}
|
||||
imports = append(imports, im)
|
||||
}
|
||||
return imports, nil
|
||||
}
|
||||
|
||||
func isRefOnlyFormat(in []string) bool {
|
||||
for _, v := range in {
|
||||
if strings.Contains(v, "=") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
@@ -1,60 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ParseSecretSpecs(sl []string) (session.Attachable, error) {
|
||||
fs := make([]secretsprovider.FileSource, 0, len(sl))
|
||||
for _, v := range sl {
|
||||
s, err := parseSecret(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs = append(fs, *s)
|
||||
}
|
||||
store, err := secretsprovider.NewFileStore(fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return secretsprovider.NewSecretProvider(store), nil
|
||||
}
|
||||
|
||||
func parseSecret(value string) (*secretsprovider.FileSource, error) {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse csv secret")
|
||||
}
|
||||
|
||||
fs := secretsprovider.FileSource{}
|
||||
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "type":
|
||||
if value != "file" {
|
||||
return nil, errors.Errorf("unsupported secret type %q", value)
|
||||
}
|
||||
case "id":
|
||||
fs.ID = value
|
||||
case "source", "src":
|
||||
fs.FilePath = value
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
return &fs, nil
|
||||
}
|
31
build/ssh.go
31
build/ssh.go
@@ -1,31 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||
)
|
||||
|
||||
func ParseSSHSpecs(sl []string) (session.Attachable, error) {
|
||||
configs := make([]sshprovider.AgentConfig, 0, len(sl))
|
||||
for _, v := range sl {
|
||||
c, err := parseSSH(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configs = append(configs, *c)
|
||||
}
|
||||
return sshprovider.NewSSHAgentProvider(configs)
|
||||
}
|
||||
|
||||
func parseSSH(value string) (*sshprovider.AgentConfig, error) {
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
cfg := sshprovider.AgentConfig{
|
||||
ID: parts[0],
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
cfg.Paths = strings.Split(parts[1], ",")
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
71
build/url.go
Normal file
71
build/url.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url string, pw progress.Writer) (string, error) {
|
||||
c, err := driver.Boot(ctx, d, pw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var out string
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := c.Solve(ctx, gwclient.SolveRequest{
|
||||
Definition: def.ToPB(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref, err := res.SingleRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := ref.StatFile(ctx, gwclient.StatRequest{
|
||||
Path: "Dockerfile",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Size() > 512*1024 {
|
||||
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size", url)
|
||||
}
|
||||
|
||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||
Filename: "Dockerfile",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := ioutil.TempDir("", "buildx")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ioutil.WriteFile(filepath.Join(dir, "Dockerfile"), dt, 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = dir
|
||||
return nil, nil
|
||||
}, ch)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out, nil
|
||||
}
|
@@ -4,20 +4,44 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/pkg/seed"
|
||||
"github.com/docker/buildx/commands"
|
||||
"github.com/docker/buildx/version"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
||||
_ "github.com/moby/buildkit/util/tracing/env"
|
||||
|
||||
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
|
||||
_ "github.com/docker/buildx/driver/docker"
|
||||
_ "github.com/docker/buildx/driver/docker-container"
|
||||
_ "github.com/docker/buildx/driver/kubernetes"
|
||||
)
|
||||
|
||||
var experimental string
|
||||
|
||||
func init() {
|
||||
seed.WithTimeAndRand()
|
||||
stack.SetVersionInfo(version.Version, version.Revision)
|
||||
|
||||
detect.ServiceName = "buildx"
|
||||
// do not log tracing errors to stdio
|
||||
otel.SetErrorHandler(skipErrors{})
|
||||
}
|
||||
|
||||
func main() {
|
||||
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||
@@ -36,13 +60,46 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
plugin.Run(func(dockerCli command.Cli) *cobra.Command {
|
||||
return commands.NewRootCmd("buildx", true, dockerCli)
|
||||
},
|
||||
manager.Metadata{
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: version.Version,
|
||||
Experimental: experimental != "",
|
||||
})
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
p := commands.NewRootCmd("buildx", true, dockerCli)
|
||||
meta := manager.Metadata{
|
||||
SchemaVersion: "0.1.0",
|
||||
Vendor: "Docker Inc.",
|
||||
Version: version.Version,
|
||||
Experimental: experimental != "",
|
||||
}
|
||||
|
||||
if err := plugin.RunPlugin(dockerCli, p, meta); err != nil {
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
if sterr.Status != "" {
|
||||
fmt.Fprintln(dockerCli.Err(), sterr.Status)
|
||||
}
|
||||
// StatusError should only be used for errors, and all errors should
|
||||
// have a non-zero exit status, so never exit with 0
|
||||
if sterr.StatusCode == 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
}
|
||||
for _, s := range errdefs.Sources(err) {
|
||||
s.Print(dockerCli.Err())
|
||||
}
|
||||
|
||||
if debug.IsEnabled() {
|
||||
fmt.Fprintf(dockerCli.Err(), "error: %+v", stack.Formatter(err))
|
||||
} else {
|
||||
fmt.Fprintf(dockerCli.Err(), "error: %v\n", err)
|
||||
}
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type skipErrors struct{}
|
||||
|
||||
func (skipErrors) Handle(err error) {}
|
||||
|
1
codecov.yml
Normal file
1
codecov.yml
Normal file
@@ -0,0 +1 @@
|
||||
comment: false
|
153
commands/bake.go
153
commands/bake.go
@@ -1,12 +1,17 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -19,31 +24,105 @@ type bakeOptions struct {
|
||||
commonOptions
|
||||
}
|
||||
|
||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) error {
|
||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if len(in.files) == 0 {
|
||||
files, err := defaultFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
end(err)
|
||||
}()
|
||||
|
||||
var url string
|
||||
cmdContext := "cwd://"
|
||||
|
||||
if len(targets) > 0 {
|
||||
if bake.IsRemoteURL(targets[0]) {
|
||||
url = targets[0]
|
||||
targets = targets[1:]
|
||||
if len(targets) > 0 {
|
||||
if bake.IsRemoteURL(targets[0]) {
|
||||
cmdContext = targets[0]
|
||||
targets = targets[1:]
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return errors.Errorf("no docker-compose.yml or docker-bake.hcl found, specify build file with -f/--file")
|
||||
}
|
||||
in.files = files
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = []string{"default"}
|
||||
}
|
||||
|
||||
m, err := bake.ReadTargets(ctx, in.files, targets, in.overrides)
|
||||
overrides := in.overrides
|
||||
if in.exportPush {
|
||||
if in.exportLoad {
|
||||
return errors.Errorf("push and load may not be set together at the moment")
|
||||
}
|
||||
overrides = append(overrides, "*.output=type=registry")
|
||||
} else if in.exportLoad {
|
||||
overrides = append(overrides, "*.output=type=docker")
|
||||
}
|
||||
if in.noCache != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *in.noCache))
|
||||
}
|
||||
if in.pull != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
|
||||
}
|
||||
contextPathHash, _ := os.Getwd()
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var files []bake.File
|
||||
var inp *bake.Input
|
||||
|
||||
if url != "" {
|
||||
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
||||
} else {
|
||||
files, err = bake.ReadLocalFiles(in.files)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||
"BAKE_CMD_CONTEXT": cmdContext,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this function can update target context string from the input so call before printOnly check
|
||||
bo, err := bake.TargetsToBuildOpt(m, inp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in.printOnly {
|
||||
dt, err := json.MarshalIndent(map[string]map[string]bake.Target{"target": m}, "", " ")
|
||||
dt, err := json.MarshalIndent(map[string]map[string]*bake.Target{"target": m}, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = printer.Wait()
|
||||
printer = nil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -51,37 +130,29 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
bo, err := bake.TargetsToBuildOpt(m)
|
||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buildTargets(ctx, dockerCli, bo, in.progress)
|
||||
}
|
||||
|
||||
func defaultFiles() ([]string, error) {
|
||||
fns := []string{
|
||||
"docker-compose.yml", // support app
|
||||
"docker-compose.yaml", // support app
|
||||
"docker-bake.json",
|
||||
"docker-bake.override.json",
|
||||
"docker-bake.hcl",
|
||||
"docker-bake.override.hcl",
|
||||
}
|
||||
out := make([]string, 0, len(fns))
|
||||
for _, f := range fns {
|
||||
if _, err := os.Stat(f); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
if len(in.metadataFile) > 0 && resp != nil {
|
||||
mdata := map[string]map[string]string{}
|
||||
for k, r := range resp {
|
||||
mdata[k] = r.ExporterResponse
|
||||
}
|
||||
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
out = append(out, f)
|
||||
}
|
||||
return out, nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func bakeCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options bakeOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -89,6 +160,14 @@ func bakeCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Aliases: []string{"f"},
|
||||
Short: "Build from a file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// reset to nil to avoid override is unset
|
||||
if !cmd.Flags().Lookup("no-cache").Changed {
|
||||
options.noCache = nil
|
||||
}
|
||||
if !cmd.Flags().Lookup("pull").Changed {
|
||||
options.pull = nil
|
||||
}
|
||||
options.commonOptions.builder = rootOpts.builder
|
||||
return runBake(dockerCli, args, options)
|
||||
},
|
||||
}
|
||||
@@ -97,9 +176,11 @@ func bakeCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (eg: target.key=value)")
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (eg: targetpattern.key=value)")
|
||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --set=*.output=type=registry")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --set=*.output=type=docker")
|
||||
|
||||
commonFlags(&options.commonOptions, flags)
|
||||
commonBuildFlags(&options.commonOptions, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@@ -2,22 +2,30 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const defaultTargetName = "default"
|
||||
|
||||
type buildOptions struct {
|
||||
commonOptions
|
||||
contextPath string
|
||||
@@ -37,13 +45,12 @@ type buildOptions struct {
|
||||
extraHosts []string
|
||||
networkMode string
|
||||
|
||||
exportPush bool
|
||||
exportLoad bool
|
||||
|
||||
// unimplemented
|
||||
squash bool
|
||||
quiet bool
|
||||
|
||||
allow []string
|
||||
|
||||
// hidden
|
||||
// untrusted bool
|
||||
// ulimits *opts.UlimitOpt
|
||||
@@ -62,21 +69,45 @@ type buildOptions struct {
|
||||
}
|
||||
|
||||
type commonOptions struct {
|
||||
noCache bool
|
||||
progress string
|
||||
pull bool
|
||||
builder string
|
||||
noCache *bool
|
||||
progress string
|
||||
pull *bool
|
||||
metadataFile string
|
||||
// golangci-lint#826
|
||||
// nolint:structcheck
|
||||
exportPush bool
|
||||
// nolint:structcheck
|
||||
exportLoad bool
|
||||
}
|
||||
|
||||
func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
if in.squash {
|
||||
return errors.Errorf("squash currently not implemented")
|
||||
}
|
||||
if in.quiet {
|
||||
return errors.Errorf("quiet currently not implemented")
|
||||
logrus.Warnf("quiet currently not implemented")
|
||||
}
|
||||
|
||||
ctx := appcontext.Context()
|
||||
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
end(err)
|
||||
}()
|
||||
|
||||
noCache := false
|
||||
if in.noCache != nil {
|
||||
noCache = *in.noCache
|
||||
}
|
||||
pull := false
|
||||
if in.pull != nil {
|
||||
pull = *in.pull
|
||||
}
|
||||
|
||||
opts := build.Options{
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: in.contextPath,
|
||||
@@ -84,10 +115,10 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
InStream: os.Stdin,
|
||||
},
|
||||
Tags: in.tags,
|
||||
Labels: listToMap(in.labels),
|
||||
BuildArgs: listToMap(in.buildArgs),
|
||||
Pull: in.pull,
|
||||
NoCache: in.noCache,
|
||||
Labels: listToMap(in.labels, false),
|
||||
BuildArgs: listToMap(in.buildArgs, true),
|
||||
Pull: pull,
|
||||
NoCache: noCache,
|
||||
Target: in.target,
|
||||
ImageIDFile: in.imageIDFile,
|
||||
ExtraHosts: in.extraHosts,
|
||||
@@ -102,19 +133,23 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
|
||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
|
||||
secrets, err := build.ParseSecretSpecs(in.secrets)
|
||||
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Session = append(opts.Session, secrets)
|
||||
|
||||
ssh, err := build.ParseSSHSpecs(in.ssh)
|
||||
sshSpecs := in.ssh
|
||||
if len(sshSpecs) == 0 && buildflags.IsGitSSH(in.contextPath) {
|
||||
sshSpecs = []string{"default"}
|
||||
}
|
||||
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Session = append(opts.Session, ssh)
|
||||
|
||||
outputs, err := build.ParseOutputs(in.outputs)
|
||||
outputs, err := buildflags.ParseOutputs(in.outputs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -155,36 +190,66 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
|
||||
opts.Exports = outputs
|
||||
|
||||
cacheImports, err := build.ParseCacheEntry(in.cacheFrom)
|
||||
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.CacheFrom = cacheImports
|
||||
|
||||
cacheExports, err := build.ParseCacheEntry(in.cacheTo)
|
||||
cacheExports, err := buildflags.ParseCacheEntry(in.cacheTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.CacheTo = cacheExports
|
||||
|
||||
return buildTargets(ctx, dockerCli, map[string]build.Options{"default": opts}, in.progress)
|
||||
allow, err := buildflags.ParseEntitlements(in.allow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Allow = allow
|
||||
|
||||
// key string used for kubernetes "sticky" mode
|
||||
contextPathHash, err := filepath.Abs(in.contextPath)
|
||||
if err != nil {
|
||||
contextPathHash = in.contextPath
|
||||
}
|
||||
|
||||
return buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||
}
|
||||
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode string) error {
|
||||
dis, err := getDefaultDrivers(ctx, dockerCli)
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) error {
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
pw := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||
|
||||
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(metadataFile) > 0 && resp != nil {
|
||||
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), pw)
|
||||
return err
|
||||
}
|
||||
|
||||
func buildCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options buildOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -194,6 +259,7 @@ func buildCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.contextPath = args[0]
|
||||
options.builder = rootOpts.builder
|
||||
return runBuild(dockerCli, options)
|
||||
},
|
||||
}
|
||||
@@ -204,8 +270,12 @@ func buildCmd(dockerCli command.Cli) *cobra.Command {
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --output=type=docker")
|
||||
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag in the 'name:tag' format")
|
||||
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||
|
||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||
|
||||
@@ -213,11 +283,15 @@ func buildCmd(dockerCli command.Cli) *cobra.Command {
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)")
|
||||
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement, e.g. network.host, security.insecure")
|
||||
|
||||
// not implemented
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (host:ip)")
|
||||
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||
flags.MarkHidden("quiet")
|
||||
@@ -271,23 +345,37 @@ func buildCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
||||
|
||||
commonFlags(&options.commonOptions, flags)
|
||||
commonBuildFlags(&options.commonOptions, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func commonFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||
|
||||
defaultProgress, ok := os.LookupEnv("BUILDX_PROGRESS_DEFAULT")
|
||||
if !ok {
|
||||
defaultProgress = "auto"
|
||||
}
|
||||
flags.StringVar(&options.progress, "progress", defaultProgress, "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
||||
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||
}
|
||||
|
||||
func listToMap(values []string) map[string]string {
|
||||
func listToMap(values []string, defaultEnv bool) map[string]string {
|
||||
result := make(map[string]string, len(values))
|
||||
for _, value := range values {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) == 1 {
|
||||
result[kv[0]] = ""
|
||||
if defaultEnv {
|
||||
v, ok := os.LookupEnv(kv[0])
|
||||
if ok {
|
||||
result[kv[0]] = v
|
||||
}
|
||||
} else {
|
||||
result[kv[0]] = ""
|
||||
}
|
||||
} else {
|
||||
result[kv[0]] = kv[1]
|
||||
}
|
||||
|
@@ -1,13 +1,17 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/google/shlex"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -22,6 +26,9 @@ type createOptions struct {
|
||||
actionAppend bool
|
||||
actionLeave bool
|
||||
use bool
|
||||
flags string
|
||||
configFile string
|
||||
driverOpts []string
|
||||
// upgrade bool // perform upgrade of the driver
|
||||
}
|
||||
|
||||
@@ -107,6 +114,14 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
ng.Driver = driverName
|
||||
}
|
||||
|
||||
var flags []string
|
||||
if in.flags != "" {
|
||||
flags, err = shlex.Split(in.flags)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse buildkit flags")
|
||||
}
|
||||
}
|
||||
|
||||
var ep string
|
||||
if in.actionLeave {
|
||||
if err := ng.Leave(in.nodeName); err != nil {
|
||||
@@ -128,7 +143,24 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ng.Update(in.nodeName, ep, in.platform, len(args) > 0, in.actionAppend); err != nil {
|
||||
|
||||
if in.driver == "kubernetes" {
|
||||
// naming endpoint to make --append works
|
||||
ep = (&url.URL{
|
||||
Scheme: in.driver,
|
||||
Path: "/" + in.name,
|
||||
RawQuery: (&url.Values{
|
||||
"deployment": {in.nodeName},
|
||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||
}).Encode(),
|
||||
}).String()
|
||||
}
|
||||
|
||||
m, err := csvToMap(in.driverOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ng.Update(in.nodeName, ep, in.platform, len(args) > 0, in.actionAppend, flags, in.configFile, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -154,6 +186,11 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
var options createOptions
|
||||
|
||||
var drivers []string
|
||||
for s := range driver.GetFactories() {
|
||||
drivers = append(drivers, s)
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] [CONTEXT|ENDPOINT]",
|
||||
Short: "Create a new builder instance",
|
||||
@@ -166,9 +203,12 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||
flags.StringVar(&options.driver, "driver", "", "Driver to use (eg. docker-container)")
|
||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %v)", drivers))
|
||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||
|
||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||
@@ -178,3 +218,22 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func csvToMap(in []string) (map[string]string, error) {
|
||||
m := make(map[string]string, len(in))
|
||||
for _, s := range in {
|
||||
csvReader := csv.NewReader(strings.NewReader(s))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range fields {
|
||||
p := strings.SplitN(v, "=", 2)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
||||
}
|
||||
m[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
196
commands/diskusage.go
Normal file
196
commands/diskusage.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type duOptions struct {
|
||||
builder string
|
||||
filter opts.FilterOpt
|
||||
verbose bool
|
||||
}
|
||||
|
||||
func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, di := range dis {
|
||||
if di.Err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
out := make([][]*client.UsageInfo, len(dis))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, di := range dis {
|
||||
func(i int, di build.DriverInfo) {
|
||||
eg.Go(func() error {
|
||||
if di.Driver != nil {
|
||||
c, err := di.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
du, err := c.DiskUsage(ctx, client.WithFilter(pi.Filter))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out[i] = du
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(i, di)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||
first := true
|
||||
for _, du := range out {
|
||||
if du == nil {
|
||||
continue
|
||||
}
|
||||
if opts.verbose {
|
||||
printVerbose(tw, du)
|
||||
} else {
|
||||
if first {
|
||||
printTableHeader(tw)
|
||||
first = false
|
||||
}
|
||||
for _, di := range du {
|
||||
printTableRow(tw, di)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
if opts.filter.Value().Len() == 0 {
|
||||
printSummary(tw, out)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
options := duOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "du",
|
||||
Short: "Disk usage",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
return runDiskUsage(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.Var(&options.filter, "filter", "Provide filter values")
|
||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printKV(w io.Writer, k string, v interface{}) {
|
||||
fmt.Fprintf(w, "%s:\t%v\n", k, v)
|
||||
}
|
||||
|
||||
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
||||
for _, di := range du {
|
||||
printKV(tw, "ID", di.ID)
|
||||
if di.Parent != "" {
|
||||
printKV(tw, "Parent", di.Parent)
|
||||
}
|
||||
printKV(tw, "Created at", di.CreatedAt)
|
||||
printKV(tw, "Mutable", di.Mutable)
|
||||
printKV(tw, "Reclaimable", !di.InUse)
|
||||
printKV(tw, "Shared", di.Shared)
|
||||
printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
|
||||
if di.Description != "" {
|
||||
printKV(tw, "Description", di.Description)
|
||||
}
|
||||
printKV(tw, "Usage count", di.UsageCount)
|
||||
if di.LastUsedAt != nil {
|
||||
printKV(tw, "Last used", di.LastUsedAt)
|
||||
}
|
||||
if di.RecordType != "" {
|
||||
printKV(tw, "Type", di.RecordType)
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, "\n")
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func printTableHeader(tw *tabwriter.Writer) {
|
||||
fmt.Fprintln(tw, "ID\tRECLAIMABLE\tSIZE\tLAST ACCESSED")
|
||||
}
|
||||
|
||||
func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
|
||||
id := di.ID
|
||||
if di.Mutable {
|
||||
id += "*"
|
||||
}
|
||||
size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
|
||||
if di.Shared {
|
||||
size += "*"
|
||||
}
|
||||
fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
|
||||
}
|
||||
|
||||
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||
total := int64(0)
|
||||
reclaimable := int64(0)
|
||||
shared := int64(0)
|
||||
|
||||
for _, du := range dus {
|
||||
for _, di := range du {
|
||||
if di.Size > 0 {
|
||||
total += di.Size
|
||||
if !di.InUse {
|
||||
reclaimable += di.Size
|
||||
}
|
||||
}
|
||||
if di.Shared {
|
||||
shared += di.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if shared > 0 {
|
||||
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
tw.Flush()
|
||||
}
|
@@ -118,7 +118,15 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
srcs[i].Ref = nil
|
||||
srcs[i].Desc = desc
|
||||
if srcs[i].Desc.Digest == "" {
|
||||
srcs[i].Desc = desc
|
||||
} else {
|
||||
var err error
|
||||
srcs[i].Desc, err = mergeDesc(desc, srcs[i].Desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(i)
|
||||
@@ -168,7 +176,7 @@ func parseSources(in []string) ([]*src, error) {
|
||||
for i, in := range in {
|
||||
s, err := parseSource(in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse source %q, valid sources are digests, refereces and descriptors", in)
|
||||
return nil, errors.Wrapf(err, "failed to parse source %q, valid sources are digests, references and descriptors", in)
|
||||
}
|
||||
out[i] = s
|
||||
}
|
||||
@@ -238,3 +246,19 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func mergeDesc(d1, d2 ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
if d2.Size != 0 && d1.Size != d2.Size {
|
||||
return ocispec.Descriptor{}, errors.Errorf("invalid size mismatch for %s, %d != %d", d1.Digest, d2.Size, d1.Size)
|
||||
}
|
||||
if d2.MediaType != "" {
|
||||
d1.MediaType = d2.MediaType
|
||||
}
|
||||
if len(d2.Annotations) != 0 {
|
||||
d1.Annotations = d2.Annotations // no merge so support removes
|
||||
}
|
||||
if d2.Platform != nil {
|
||||
d1.Platform = d2.Platform // missing items filled in later from image config
|
||||
}
|
||||
return d1, nil
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
}
|
||||
|
||||
if in.raw {
|
||||
fmt.Printf("%s\n", dt)
|
||||
fmt.Printf("%s", dt) // avoid newline to keep digest
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||
// TODO: handle distribution manifest and schema1
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||
default:
|
||||
fmt.Printf("%s\n", dt)
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
type inspectOptions struct {
|
||||
bootstrap bool
|
||||
builder string
|
||||
}
|
||||
|
||||
type dinfo struct {
|
||||
@@ -38,7 +39,7 @@ type nginfo struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions, args []string) error {
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := getStore(dockerCli)
|
||||
@@ -49,8 +50,8 @@ func runInspect(dockerCli command.Cli, in inspectOptions, args []string) error {
|
||||
|
||||
var ng *store.NodeGroup
|
||||
|
||||
if len(args) > 0 {
|
||||
ng, err = getNodeGroup(txn, dockerCli, args[0])
|
||||
if in.builder != "" {
|
||||
ng, err = getNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -73,17 +74,19 @@ func runInspect(dockerCli command.Cli, in inspectOptions, args []string) error {
|
||||
|
||||
ngi := &nginfo{ng: ng}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
|
||||
|
||||
var bootNgi *nginfo
|
||||
if in.bootstrap {
|
||||
var ok bool
|
||||
ok, err = boot(ctx, ngi)
|
||||
ok, err = boot(ctx, ngi, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bootNgi = ngi
|
||||
if ok {
|
||||
ngi = &nginfo{ng: ng}
|
||||
err = loadNodeGroupData(ctx, dockerCli, ngi)
|
||||
@@ -112,9 +115,14 @@ func runInspect(dockerCli command.Cli, in inspectOptions, args []string) error {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else if err := ngi.drivers[i].err; err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
|
||||
} else {
|
||||
fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.Format(platformutil.Dedupe(append(n.Platforms, ngi.drivers[i].platforms...))), ", "))
|
||||
if len(n.Flags) > 0 {
|
||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
||||
}
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,7 +132,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options inspectOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -132,7 +140,11 @@ func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "Inspect current builder instance",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInspect(dockerCli, options, args)
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runInspect(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -145,7 +157,7 @@ func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
func boot(ctx context.Context, ngi *nginfo, dockerCli command.Cli) (bool, error) {
|
||||
toBoot := make([]int, 0, len(ngi.drivers))
|
||||
for i, d := range ngi.drivers {
|
||||
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
||||
@@ -159,25 +171,27 @@ func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pw := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||
|
||||
mw := progress.NewMultiWriter(pw)
|
||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, idx := range toBoot {
|
||||
func(idx int) {
|
||||
eg.Go(func() error {
|
||||
pw := mw.WithPrefix(ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
||||
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
||||
_, err := driver.Boot(ctx, ngi.drivers[idx].di.Driver, pw)
|
||||
if err != nil {
|
||||
ngi.drivers[idx].err = err
|
||||
}
|
||||
close(pw.Status())
|
||||
<-pw.Done()
|
||||
return nil
|
||||
})
|
||||
}(idx)
|
||||
}
|
||||
|
||||
return true, eg.Wait()
|
||||
err := eg.Wait()
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
|
||||
return true, err
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 7*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ll, err := txn.List()
|
||||
@@ -126,11 +126,10 @@ func printngi(w io.Writer, ngi *nginfo) {
|
||||
if d.info != nil {
|
||||
status = d.info.Status.String()
|
||||
}
|
||||
p := append(n.Platforms, d.platforms...)
|
||||
if err != "" {
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\n", n.Name, n.Endpoint, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, strings.Join(platformutil.Format(p), ", "))
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
197
commands/prune.go
Normal file
197
commands/prune.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
builder string
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
keepStorage opts.MemBytes
|
||||
force bool
|
||||
verbose bool
|
||||
}
|
||||
|
||||
const (
|
||||
normalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`
|
||||
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
pruneFilters := opts.filter.Value()
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
pi, err := toBuildkitPruneInfo(pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
warning := normalWarning
|
||||
if opts.all {
|
||||
warning = allCacheWarning
|
||||
}
|
||||
|
||||
if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return nil
|
||||
}
|
||||
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, di := range dis {
|
||||
if di.Err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan client.UsageInfo)
|
||||
printed := make(chan struct{})
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||
first := true
|
||||
total := int64(0)
|
||||
|
||||
go func() {
|
||||
defer close(printed)
|
||||
for du := range ch {
|
||||
total += du.Size
|
||||
if opts.verbose {
|
||||
printVerbose(tw, []*client.UsageInfo{&du})
|
||||
} else {
|
||||
if first {
|
||||
printTableHeader(tw)
|
||||
first = false
|
||||
}
|
||||
printTableRow(tw, &du)
|
||||
tw.Flush()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, di := range dis {
|
||||
func(di build.DriverInfo) {
|
||||
eg.Go(func() error {
|
||||
if di.Driver != nil {
|
||||
c, err := di.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
popts := []client.PruneOption{
|
||||
client.WithKeepOpt(pi.KeepDuration, opts.keepStorage.Value()),
|
||||
client.WithFilter(pi.Filter),
|
||||
}
|
||||
if opts.all {
|
||||
popts = append(popts, client.PruneAll)
|
||||
}
|
||||
return c.Prune(ctx, ch, popts...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(di)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
close(ch)
|
||||
<-printed
|
||||
|
||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
tw.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Remove build cache",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
return runPrune(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=24h')")
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
||||
var until time.Duration
|
||||
untilValues := f.Get("until") // canonical
|
||||
unusedForValues := f.Get("unused-for") // deprecated synonym for "until" filter
|
||||
|
||||
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
||||
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
|
||||
}
|
||||
filterKey := "until"
|
||||
if len(unusedForValues) > 0 {
|
||||
filterKey = "unused-for"
|
||||
}
|
||||
untilValues = append(untilValues, unusedForValues...)
|
||||
|
||||
switch len(untilValues) {
|
||||
case 0:
|
||||
// nothing to do
|
||||
case 1:
|
||||
var err error
|
||||
until, err = time.ParseDuration(untilValues[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("filters expect only one value")
|
||||
}
|
||||
|
||||
bkFilter := make([]string, 0, f.Len())
|
||||
for _, field := range f.Keys() {
|
||||
values := f.Get(field)
|
||||
switch len(values) {
|
||||
case 0:
|
||||
bkFilter = append(bkFilter, field)
|
||||
case 1:
|
||||
if field == "id" {
|
||||
bkFilter = append(bkFilter, field+"~="+values[0])
|
||||
} else {
|
||||
bkFilter = append(bkFilter, field+"=="+values[0])
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("filters expect only one value")
|
||||
}
|
||||
}
|
||||
return &client.PruneInfo{
|
||||
KeepDuration: until,
|
||||
Filter: []string{strings.Join(bkFilter, ",")},
|
||||
}, nil
|
||||
}
|
@@ -11,9 +11,11 @@ import (
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builder string
|
||||
keepState bool
|
||||
}
|
||||
|
||||
func runRm(dockerCli command.Cli, in rmOptions, args []string) error {
|
||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := getStore(dockerCli)
|
||||
@@ -22,12 +24,12 @@ func runRm(dockerCli command.Cli, in rmOptions, args []string) error {
|
||||
}
|
||||
defer release()
|
||||
|
||||
if len(args) > 0 {
|
||||
ng, err := getNodeGroup(txn, dockerCli, args[0])
|
||||
if in.builder != "" {
|
||||
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := stop(ctx, dockerCli, ng, true)
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -39,7 +41,7 @@ func runRm(dockerCli command.Cli, in rmOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
if ng != nil {
|
||||
err1 := stop(ctx, dockerCli, ng, true)
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -49,7 +51,7 @@ func runRm(dockerCli command.Cli, in rmOptions, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func rmCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options rmOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -57,15 +59,22 @@ func rmCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "Remove a builder instance",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRm(dockerCli, options, args)
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runRm(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, rm bool) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng)
|
||||
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -74,34 +83,9 @@ func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, rm bo
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if rm {
|
||||
if err := di.Driver.Rm(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func stopCurrent(ctx context.Context, dockerCli command.Cli, rm bool) error {
|
||||
dis, err := getDefaultDrivers(ctx, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||
return err
|
||||
}
|
||||
if rm {
|
||||
if err := di.Driver.Rm(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
|
@@ -1,10 +1,13 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||
@@ -22,19 +25,32 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
return cmd
|
||||
}
|
||||
|
||||
type rootOptions struct {
|
||||
builder string
|
||||
}
|
||||
|
||||
func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
opts := &rootOptions{}
|
||||
rootFlags(opts, cmd.PersistentFlags())
|
||||
|
||||
cmd.AddCommand(
|
||||
buildCmd(dockerCli),
|
||||
bakeCmd(dockerCli),
|
||||
buildCmd(dockerCli, opts),
|
||||
bakeCmd(dockerCli, opts),
|
||||
createCmd(dockerCli),
|
||||
rmCmd(dockerCli),
|
||||
rmCmd(dockerCli, opts),
|
||||
lsCmd(dockerCli),
|
||||
useCmd(dockerCli),
|
||||
inspectCmd(dockerCli),
|
||||
stopCmd(dockerCli),
|
||||
useCmd(dockerCli, opts),
|
||||
inspectCmd(dockerCli, opts),
|
||||
stopCmd(dockerCli, opts),
|
||||
installCmd(dockerCli),
|
||||
uninstallCmd(dockerCli),
|
||||
versionCmd(dockerCli),
|
||||
pruneCmd(dockerCli, opts),
|
||||
duCmd(dockerCli, opts),
|
||||
imagetoolscmd.RootCmd(dockerCli),
|
||||
)
|
||||
}
|
||||
|
||||
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
@@ -8,9 +11,10 @@ import (
|
||||
)
|
||||
|
||||
type stopOptions struct {
|
||||
builder string
|
||||
}
|
||||
|
||||
func runStop(dockerCli command.Cli, in stopOptions, args []string) error {
|
||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := getStore(dockerCli)
|
||||
@@ -19,12 +23,12 @@ func runStop(dockerCli command.Cli, in stopOptions, args []string) error {
|
||||
}
|
||||
defer release()
|
||||
|
||||
if len(args) > 0 {
|
||||
ng, err := getNodeGroup(txn, dockerCli, args[0])
|
||||
if in.builder != "" {
|
||||
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := stop(ctx, dockerCli, ng, false); err != nil {
|
||||
if err := stop(ctx, dockerCli, ng); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -35,13 +39,13 @@ func runStop(dockerCli command.Cli, in stopOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
if ng != nil {
|
||||
return stop(ctx, dockerCli, ng, false)
|
||||
return stop(ctx, dockerCli, ng)
|
||||
}
|
||||
|
||||
return stopCurrent(ctx, dockerCli, false)
|
||||
return stopCurrent(ctx, dockerCli)
|
||||
}
|
||||
|
||||
func stopCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options stopOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
@@ -49,7 +53,11 @@ func stopCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "Stop builder instance",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runStop(dockerCli, options, args)
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runStop(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -61,3 +69,39 @@ func stopCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
|
||||
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -12,21 +12,22 @@ import (
|
||||
type useOptions struct {
|
||||
isGlobal bool
|
||||
isDefault bool
|
||||
builder string
|
||||
}
|
||||
|
||||
func runUse(dockerCli command.Cli, in useOptions, name string) error {
|
||||
func runUse(dockerCli command.Cli, in useOptions) error {
|
||||
txn, release, err := getStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if _, err := txn.NodeGroupByName(name); err != nil {
|
||||
if _, err := txn.NodeGroupByName(in.builder); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if name == "default" && name != dockerCli.CurrentContext() {
|
||||
if in.builder == "default" && in.builder != dockerCli.CurrentContext() {
|
||||
return errors.Errorf("run `docker context use default` to switch to default context")
|
||||
}
|
||||
if name == "default" || name == dockerCli.CurrentContext() {
|
||||
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
||||
ep, err := getCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -41,35 +42,39 @@ func runUse(dockerCli command.Cli, in useOptions, name string) error {
|
||||
return err
|
||||
}
|
||||
for _, l := range list {
|
||||
if l.Name == name {
|
||||
return errors.Errorf("run `docker context use %s` to switch to context %s", name, name)
|
||||
if l.Name == in.builder {
|
||||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return errors.Wrapf(err, "failed to find instance %q", name)
|
||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||
}
|
||||
|
||||
ep, err := getCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.SetCurrent(ep, name, in.isGlobal, in.isDefault); err != nil {
|
||||
if err := txn.SetCurrent(ep, in.builder, in.isGlobal, in.isDefault); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func useCmd(dockerCli command.Cli) *cobra.Command {
|
||||
func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options useOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "use [OPTIONS] NAME",
|
||||
Short: "Set the current builder instance",
|
||||
Args: cli.ExactArgs(1),
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runUse(dockerCli, options, args[0])
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runUse(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
|
170
commands/util.go
170
commands/util.go
@@ -2,8 +2,10 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
@@ -11,22 +13,39 @@ import (
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
ctxstore "github.com/docker/cli/cli/context/store"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// getStore returns current builder instance store
|
||||
func getStore(dockerCli command.Cli) (*store.Txn, func(), error) {
|
||||
dir := filepath.Dir(dockerCli.ConfigFile().Filename)
|
||||
s, err := store.New(dir)
|
||||
s, err := store.New(getConfigStorePath(dockerCli))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return s.Txn()
|
||||
}
|
||||
|
||||
// getConfigStorePath will look for correct configuration store path;
|
||||
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
|
||||
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
|
||||
func getConfigStorePath(dockerCli command.Cli) string {
|
||||
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
|
||||
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
|
||||
return buildxConfig
|
||||
}
|
||||
|
||||
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
|
||||
logrus.Debugf("using default config store %q", buildxConfig)
|
||||
return buildxConfig
|
||||
}
|
||||
|
||||
// getCurrentEndpoint returns the current default endpoint value
|
||||
func getCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||
name := dockerCli.CurrentContext()
|
||||
@@ -133,7 +152,7 @@ func getNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.No
|
||||
}
|
||||
|
||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) ([]build.DriverInfo, error) {
|
||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dis := make([]build.DriverInfo, len(ng.Nodes))
|
||||
@@ -174,7 +193,37 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
||||
// TODO: replace the following line with dockerclient.WithAPIVersionNegotiation option in clientForEndpoint
|
||||
dockerapi.NegotiateAPIVersion(ctx)
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi)
|
||||
contextStore := dockerCli.ContextStore()
|
||||
|
||||
var kcc driver.KubeClientConfig
|
||||
kcc, err = configFromContext(n.Endpoint, contextStore)
|
||||
if err != nil {
|
||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME: n should retain real context name.
|
||||
kcc, err = configFromContext("default", contextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
tryToUseKubeConfigInCluster := false
|
||||
if kcc == nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
} else {
|
||||
if _, err := kcc.ClientConfig(); err != nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseKubeConfigInCluster {
|
||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
kcc = kccInCluster
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, dockerCli.ConfigFile(), kcc, n.Flags, n.ConfigFile, n.DriverOpts, n.Platforms, contextPathHash)
|
||||
if err != nil {
|
||||
di.Err = err
|
||||
return nil
|
||||
@@ -192,6 +241,21 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
||||
return dis, nil
|
||||
}
|
||||
|
||||
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
|
||||
if strings.HasPrefix(endpointName, "kubernetes://") {
|
||||
u, _ := url.Parse(endpointName)
|
||||
|
||||
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
|
||||
&clientcmd.ConfigOverrides{},
|
||||
)
|
||||
return clientConfig, nil
|
||||
}
|
||||
}
|
||||
return kubernetes.ConfigFromContext(endpointName, s)
|
||||
}
|
||||
|
||||
// clientForEndpoint returns a docker client for an endpoint
|
||||
func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClient, error) {
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
@@ -234,24 +298,66 @@ func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClie
|
||||
return dockerclient.NewClientWithOpts(clientOpts...)
|
||||
}
|
||||
|
||||
// getDefaultDrivers returns drivers based on current cli config
|
||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli) ([]build.DriverInfo, error) {
|
||||
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
var defaultOnly bool
|
||||
|
||||
if instance == "default" && instance != dockerCli.CurrentContext() {
|
||||
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
|
||||
}
|
||||
if instance == "default" || instance == dockerCli.CurrentContext() {
|
||||
instance = ""
|
||||
defaultOnly = true
|
||||
}
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, l := range list {
|
||||
if l.Name == instance {
|
||||
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
|
||||
}
|
||||
}
|
||||
|
||||
if instance != "" {
|
||||
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
|
||||
}
|
||||
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
|
||||
}
|
||||
|
||||
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
txn, release, err := getStore(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
ng, err := getCurrentInstance(txn, dockerCli)
|
||||
ng, err := txn.NodeGroupByName(instance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
||||
}
|
||||
|
||||
if ng != nil {
|
||||
return driversForNodeGroup(ctx, dockerCli, ng)
|
||||
// getDefaultDrivers returns drivers based on current cli config
|
||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
txn, release, err := getStore(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if !defaultOnly {
|
||||
ng, err := getCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ng != nil {
|
||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client())
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), dockerCli.ConfigFile(), nil, nil, "", nil, nil, contextPathHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -294,7 +400,7 @@ func loadInfoData(ctx context.Context, d *dinfo) error {
|
||||
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng)
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -312,7 +418,47 @@ func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo)
|
||||
}(&ngi.drivers[i])
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
if eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubernetesDriverCount := 0
|
||||
|
||||
for _, di := range ngi.drivers {
|
||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||
kubernetesDriverCount++
|
||||
}
|
||||
}
|
||||
|
||||
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
|
||||
|
||||
if isAllKubernetesDrivers {
|
||||
var drivers []dinfo
|
||||
var dynamicNodes []store.Node
|
||||
|
||||
for _, di := range ngi.drivers {
|
||||
// dynamic nodes are used in Kubernetes driver.
|
||||
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
|
||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||
for i := 0; i < len(di.info.DynamicNodes); i++ {
|
||||
// all []dinfo share *build.DriverInfo and *driver.Info
|
||||
diClone := di
|
||||
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||
diClone.platforms = pl
|
||||
}
|
||||
drivers = append(drivers, di)
|
||||
}
|
||||
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
|
||||
}
|
||||
}
|
||||
|
||||
// not append (remove the static nodes in the store)
|
||||
ngi.ng.Nodes = dynamicNodes
|
||||
ngi.drivers = drivers
|
||||
ngi.ng.Dynamic = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerAPI(dockerCli command.Cli) *api {
|
||||
|
@@ -17,7 +17,7 @@ func runVersion(dockerCli command.Cli) error {
|
||||
func versionCmd(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show buildx version information ",
|
||||
Short: "Show buildx version information",
|
||||
Args: cli.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runVersion(dockerCli)
|
||||
|
198
docs/docsgen/generate.go
Normal file
198
docs/docsgen/generate.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/commands"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const descriptionSourcePath = "docs/reference/"
|
||||
|
||||
func generateDocs(opts *options) error {
|
||||
dockerCLI, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "docker [OPTIONS] COMMAND [ARG...]",
|
||||
Short: "The base command for the Docker CLI.",
|
||||
}
|
||||
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
|
||||
return genCmd(cmd, opts.target)
|
||||
}
|
||||
|
||||
func getMDFilename(cmd *cobra.Command) string {
|
||||
name := cmd.CommandPath()
|
||||
if i := strings.Index(name, " "); i >= 0 {
|
||||
name = name[i+1:]
|
||||
}
|
||||
return strings.ReplaceAll(name, " ", "_") + ".md"
|
||||
}
|
||||
|
||||
func genCmd(cmd *cobra.Command, dir string) error {
|
||||
for _, c := range cmd.Commands() {
|
||||
if err := genCmd(c, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !cmd.HasParent() {
|
||||
return nil
|
||||
}
|
||||
|
||||
mdFile := getMDFilename(cmd)
|
||||
fullPath := filepath.Join(dir, mdFile)
|
||||
|
||||
content, err := ioutil.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return errors.Wrapf(err, "%s does not exist", mdFile)
|
||||
}
|
||||
}
|
||||
|
||||
cs := string(content)
|
||||
|
||||
markerStart := "<!---MARKER_GEN_START-->"
|
||||
markerEnd := "<!---MARKER_GEN_END-->"
|
||||
|
||||
start := strings.Index(cs, markerStart)
|
||||
end := strings.Index(cs, markerEnd)
|
||||
|
||||
if start == -1 {
|
||||
return errors.Errorf("no start marker in %s", mdFile)
|
||||
}
|
||||
if end == -1 {
|
||||
return errors.Errorf("no end marker in %s", mdFile)
|
||||
}
|
||||
|
||||
out, err := cmdOutput(cmd, cs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cont := cs[:start] + markerStart + "\n" + out + "\n" + cs[end:]
|
||||
|
||||
fi, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(fullPath, []byte(cont), fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to write %s", fullPath)
|
||||
}
|
||||
log.Printf("updated %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeLink(txt, link string, f *pflag.Flag, isAnchor bool) string {
|
||||
link = "#" + link
|
||||
annotations, ok := f.Annotations["docs.external.url"]
|
||||
if ok && len(annotations) > 0 {
|
||||
link = annotations[0]
|
||||
} else {
|
||||
if !isAnchor {
|
||||
return txt
|
||||
}
|
||||
}
|
||||
|
||||
return "[" + txt + "](" + link + ")"
|
||||
}
|
||||
|
||||
func cmdOutput(cmd *cobra.Command, old string) (string, error) {
|
||||
b := &strings.Builder{}
|
||||
|
||||
desc := cmd.Short
|
||||
if cmd.Long != "" {
|
||||
desc = cmd.Long
|
||||
}
|
||||
if desc != "" {
|
||||
fmt.Fprintf(b, "%s\n\n", desc)
|
||||
}
|
||||
|
||||
if len(cmd.Aliases) != 0 {
|
||||
fmt.Fprintf(b, "### Aliases\n\n`%s`", cmd.Name())
|
||||
for _, a := range cmd.Aliases {
|
||||
fmt.Fprintf(b, ", `%s`", a)
|
||||
}
|
||||
fmt.Fprint(b, "\n\n")
|
||||
}
|
||||
|
||||
if len(cmd.Commands()) != 0 {
|
||||
fmt.Fprint(b, "### Subcommands\n\n")
|
||||
fmt.Fprint(b, "| Name | Description |\n")
|
||||
fmt.Fprint(b, "| --- | --- |\n")
|
||||
for _, c := range cmd.Commands() {
|
||||
fmt.Fprintf(b, "| [`%s`](%s) | %s |\n", c.Name(), getMDFilename(c), c.Short)
|
||||
}
|
||||
fmt.Fprint(b, "\n\n")
|
||||
}
|
||||
|
||||
hasFlags := cmd.Flags().HasAvailableFlags()
|
||||
|
||||
cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||||
|
||||
if hasFlags {
|
||||
fmt.Fprint(b, "### Options\n\n")
|
||||
fmt.Fprint(b, "| Name | Description |\n")
|
||||
fmt.Fprint(b, "| --- | --- |\n")
|
||||
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
if f.Hidden {
|
||||
return
|
||||
}
|
||||
isLink := strings.Contains(old, "<a name=\""+f.Name+"\"></a>")
|
||||
fmt.Fprint(b, "| ")
|
||||
if f.Shorthand != "" {
|
||||
name := "`-" + f.Shorthand + "`"
|
||||
name = makeLink(name, f.Name, f, isLink)
|
||||
fmt.Fprintf(b, "%s, ", name)
|
||||
}
|
||||
name := "`--" + f.Name
|
||||
if f.Value.Type() != "bool" {
|
||||
name += " " + f.Value.Type()
|
||||
}
|
||||
name += "`"
|
||||
name = makeLink(name, f.Name, f, isLink)
|
||||
fmt.Fprintf(b, "%s | %s |\n", name, f.Usage)
|
||||
})
|
||||
fmt.Fprintln(b, "")
|
||||
}
|
||||
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
type options struct {
|
||||
target string
|
||||
}
|
||||
|
||||
func parseArgs() (*options, error) {
|
||||
opts := &options{}
|
||||
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
|
||||
flags.StringVar(&opts.target, "target", descriptionSourcePath, "Docs directory")
|
||||
err := flags.Parse(os.Args[1:])
|
||||
return opts, err
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
log.Printf("error: %+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
opts, err := parseArgs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := generateDocs(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
31
docs/reference/buildx.md
Normal file
31
docs/reference/buildx.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# buildx
|
||||
|
||||
```
|
||||
docker buildx [OPTIONS] COMMAND
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Build with BuildKit
|
||||
|
||||
### Subcommands
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`bake`](buildx_bake.md) | Build from a file |
|
||||
| [`build`](buildx_build.md) | Start a build |
|
||||
| [`create`](buildx_create.md) | Create a new builder instance |
|
||||
| [`du`](buildx_du.md) | Disk usage |
|
||||
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
|
||||
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
|
||||
| [`install`](buildx_install.md) | Install buildx as a 'docker builder' alias |
|
||||
| [`ls`](buildx_ls.md) | List builder instances |
|
||||
| [`prune`](buildx_prune.md) | Remove build cache |
|
||||
| [`rm`](buildx_rm.md) | Remove a builder instance |
|
||||
| [`stop`](buildx_stop.md) | Stop builder instance |
|
||||
| [`uninstall`](buildx_uninstall.md) | Uninstall the 'docker builder' alias |
|
||||
| [`use`](buildx_use.md) | Set the current builder instance |
|
||||
| [`version`](buildx_version.md) | Show buildx version information |
|
||||
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
374
docs/reference/buildx_bake.md
Normal file
374
docs/reference/buildx_bake.md
Normal file
@@ -0,0 +1,374 @@
|
||||
# buildx bake
|
||||
|
||||
```
|
||||
docker buildx bake [OPTIONS] [TARGET...]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Build from a file
|
||||
|
||||
### Aliases
|
||||
|
||||
`bake`, `f`
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||
| `--load` | Shorthand for --set=*.output=type=docker |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||
| [`--print`](#print) | Print the options without building |
|
||||
| [`--progress string`](#progress) | Set type of progress output (auto, plain, tty). Use plain to show container output |
|
||||
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||
| `--push` | Shorthand for --set=*.output=type=registry |
|
||||
| [`--set stringArray`](#set) | Override target value (eg: targetpattern.key=value) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Bake is a high-level build command. Each specified target will run in parallel
|
||||
as part of the build.
|
||||
|
||||
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options) for introduction.
|
||||
|
||||
Please note that `buildx bake` command may receive backwards incompatible features in the future if needed. We are looking for feedback on improving the command and extending the functionality further.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="file"></a> Specify a build definition file (-f, --file)
|
||||
|
||||
By default, `buildx bake` looks for build definition files in the current directory,
|
||||
the following are parsed:
|
||||
|
||||
- `docker-compose.yml`
|
||||
- `docker-compose.yaml`
|
||||
- `docker-bake.json`
|
||||
- `docker-bake.override.json`
|
||||
- `docker-bake.hcl`
|
||||
- `docker-bake.override.hcl`
|
||||
|
||||
Use the `-f` / `--file` option to specify the build definition file to use. The
|
||||
file can be a Docker Compose, JSON or HCL file. If multiple files are specified
|
||||
they are all read and configurations are combined.
|
||||
|
||||
The following example uses a Docker Compose file named `docker-compose.dev.yaml`
|
||||
as build definition file, and builds all targets in the file:
|
||||
|
||||
```console
|
||||
$ docker buildx bake -f docker-compose.dev.yaml
|
||||
|
||||
[+] Building 66.3s (30/30) FINISHED
|
||||
=> [frontend internal] load build definition from Dockerfile 0.1s
|
||||
=> => transferring dockerfile: 36B 0.0s
|
||||
=> [backend internal] load build definition from Dockerfile 0.2s
|
||||
=> => transferring dockerfile: 3.73kB 0.0s
|
||||
=> [database internal] load build definition from Dockerfile 0.1s
|
||||
=> => transferring dockerfile: 5.77kB 0.0s
|
||||
...
|
||||
```
|
||||
|
||||
Pass the names of the targets to build, to build only specific target(s). The
|
||||
following example builds the `backend` and `database` targets that are defined
|
||||
in the `docker-compose.dev.yaml` file, skipping the build for the `frontend`
|
||||
target:
|
||||
|
||||
```console
|
||||
$ docker buildx bake -f docker-compose.dev.yaml backend database
|
||||
|
||||
[+] Building 2.4s (13/13) FINISHED
|
||||
=> [backend internal] load build definition from Dockerfile 0.1s
|
||||
=> => transferring dockerfile: 81B 0.0s
|
||||
=> [database internal] load build definition from Dockerfile 0.2s
|
||||
=> => transferring dockerfile: 36B 0.0s
|
||||
=> [backend internal] load .dockerignore 0.3s
|
||||
...
|
||||
```
|
||||
|
||||
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
||||
|
||||
Same as `build --no-cache`. Do not use cache when building the image.
|
||||
|
||||
### <a name="print"></a> Print the options without building (--print)
|
||||
|
||||
Prints the resulting options of the targets desired to be built, in a JSON format,
|
||||
without starting a build.
|
||||
|
||||
```console
|
||||
$ docker buildx bake -f docker-bake.hcl --print db
|
||||
{
|
||||
"target": {
|
||||
"db": {
|
||||
"context": "./",
|
||||
"dockerfile": "Dockerfile",
|
||||
"tags": [
|
||||
"docker.io/tiborvass/db"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### <a name="progress"></a> Set type of progress output (--progress)
|
||||
|
||||
Same as `build --progress`. Set type of progress output (auto, plain, tty). Use
|
||||
plain to show container output (default "auto").
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
|
||||
```console
|
||||
$ docker buildx bake --progress=plain
|
||||
|
||||
#2 [backend internal] load build definition from Dockerfile.test
|
||||
#2 sha256:de70cb0bb6ed8044f7b9b1b53b67f624e2ccfb93d96bb48b70c1fba562489618
|
||||
#2 ...
|
||||
|
||||
#1 [database internal] load build definition from Dockerfile.test
|
||||
#1 sha256:453cb50abd941762900a1212657a35fc4aad107f5d180b0ee9d93d6b74481bce
|
||||
#1 transferring dockerfile: 36B done
|
||||
#1 DONE 0.1s
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||
|
||||
Same as `build --pull`.
|
||||
|
||||
### <a name="set"></a> Override target configurations from command line (--set)
|
||||
|
||||
```
|
||||
--set targetpattern.key[.subkey]=value
|
||||
```
|
||||
|
||||
Override target configurations from command line. The pattern matching syntax is
|
||||
defined in https://golang.org/pkg/path/#Match.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx bake --set target.args.mybuildarg=value
|
||||
$ docker buildx bake --set target.platform=linux/arm64
|
||||
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
||||
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
||||
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
||||
```
|
||||
|
||||
Complete list of overridable fields:
|
||||
args, cache-from, cache-to, context, dockerfile, labels, no-cache, output, platform,
|
||||
pull, secrets, ssh, tags, target
|
||||
|
||||
### File definition
|
||||
|
||||
In addition to compose files, bake supports a JSON and an equivalent HCL file
|
||||
format for defining build groups and targets.
|
||||
|
||||
A target reflects a single docker build invocation with the same options that
|
||||
you would specify for `docker build`. A group is a grouping of targets.
|
||||
|
||||
Multiple files can include the same target and final build options will be
|
||||
determined by merging them together.
|
||||
|
||||
In the case of compose files, each service corresponds to a target.
|
||||
|
||||
A group can specify its list of targets with the `targets` option. A target can
|
||||
inherit build options by setting the `inherits` option to the list of targets or
|
||||
groups to inherit from.
|
||||
|
||||
Note: Design of bake command is work in progress, the user experience may change
|
||||
based on feedback.
|
||||
|
||||
|
||||
**Example HCL definition**
|
||||
|
||||
```hcl
|
||||
group "default" {
|
||||
targets = ["db", "webapp-dev"]
|
||||
}
|
||||
|
||||
target "webapp-dev" {
|
||||
dockerfile = "Dockerfile.webapp"
|
||||
tags = ["docker.io/username/webapp"]
|
||||
}
|
||||
|
||||
target "webapp-release" {
|
||||
inherits = ["webapp-dev"]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
|
||||
target "db" {
|
||||
dockerfile = "Dockerfile.db"
|
||||
tags = ["docker.io/username/db"]
|
||||
}
|
||||
```
|
||||
|
||||
Complete list of valid target fields:
|
||||
|
||||
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||
|
||||
### HCL variables and functions
|
||||
|
||||
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
||||
the HCL file format also supports variable block definitions. These can be used
|
||||
to define variables with values provided by the current environment, or a default
|
||||
value when unset.
|
||||
|
||||
|
||||
Example of using interpolation to tag an image with the git sha:
|
||||
|
||||
```console
|
||||
$ cat <<'EOF' > docker-bake.hcl
|
||||
variable "TAG" {
|
||||
default = "latest"
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
tags = ["docker.io/username/webapp:${TAG}"]
|
||||
}
|
||||
EOF
|
||||
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"target": {
|
||||
"webapp": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"tags": [
|
||||
"docker.io/username/webapp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||
{
|
||||
"target": {
|
||||
"webapp": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"tags": [
|
||||
"docker.io/username/webapp:985e9e9"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hcl.go#L19-L65)
|
||||
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
||||
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
||||
are also supported.
|
||||
|
||||
Example of using the `add` function:
|
||||
|
||||
```console
|
||||
$ cat <<'EOF' > docker-bake.hcl
|
||||
variable "TAG" {
|
||||
default = "latest"
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
args = {
|
||||
buildno = "${add(123, 1)}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"target": {
|
||||
"webapp": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"buildno": "124"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Example of defining an `increment` function:
|
||||
|
||||
```console
|
||||
$ cat <<'EOF' > docker-bake.hcl
|
||||
function "increment" {
|
||||
params = [number]
|
||||
result = number + 1
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["webapp"]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
args = {
|
||||
buildno = "${increment(123)}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"target": {
|
||||
"webapp": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"buildno": "124"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Example of only adding tags if a variable is not empty using an `notequal`
|
||||
function:
|
||||
|
||||
```console
|
||||
$ cat <<'EOF' > docker-bake.hcl
|
||||
variable "TAG" {default="" }
|
||||
|
||||
group "default" {
|
||||
targets = [
|
||||
"webapp",
|
||||
]
|
||||
}
|
||||
|
||||
target "webapp" {
|
||||
context="."
|
||||
dockerfile="Dockerfile"
|
||||
tags = [
|
||||
"my-image:latest",
|
||||
notequal("",TAG) ? "my-image:${TAG}": "",
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"target": {
|
||||
"webapp": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"tags": [
|
||||
"my-image:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
272
docs/reference/buildx_build.md
Normal file
272
docs/reference/buildx_build.md
Normal file
@@ -0,0 +1,272 @@
|
||||
# buildx build
|
||||
|
||||
```
|
||||
docker buildx build [OPTIONS] PATH | URL | -
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Start a build
|
||||
|
||||
### Aliases
|
||||
|
||||
`build`, `b`
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (host:ip) |
|
||||
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement, e.g. network.host, security.insecure |
|
||||
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`--cache-from stringArray`](#cache-from) | External cache sources (eg. user/app:cache, type=local,src=path/to/dir) |
|
||||
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir) |
|
||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (Default is 'PATH/Dockerfile') |
|
||||
| `--iidfile string` | Write the image ID to the file |
|
||||
| `--label stringArray` | Set metadata for an image |
|
||||
| [`--load`](#load) | Shorthand for --output=type=docker |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||
| `--no-cache` | Do not use cache when building the image |
|
||||
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: type=local,dest=path) |
|
||||
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||
| `--progress string` | Set type of progress output (auto, plain, tty). Use plain to show container output |
|
||||
| `--pull` | Always attempt to pull a newer version of the image |
|
||||
| [`--push`](#push) | Shorthand for --output=type=registry |
|
||||
| `--secret stringArray` | Secret file to expose to the build: id=mysecret,src=/local/secret |
|
||||
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]]) |
|
||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag in the 'name:tag' format |
|
||||
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
The `buildx build` command starts a build using BuildKit. This command is similar
|
||||
to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
For documentation on most of these flags, refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||
here we’ll document a subset of the new flags.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
|
||||
```
|
||||
--platform=value[,value]
|
||||
```
|
||||
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||
without their own `--platform` flag will pull base images for this platform and
|
||||
this value will also be the platform of the resulting image. The default value
|
||||
will be the current platform of the buildkit daemon.
|
||||
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||
values as an input separated by a comma. With multiple values the result will be
|
||||
built for all of the specified platforms and joined together into a single manifest
|
||||
list.
|
||||
|
||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
The formatting for the platform specifier is defined in the [containerd source
|
||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --platform=linux/arm64 .
|
||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
$ docker buildx build --platform=darwin .
|
||||
```
|
||||
|
||||
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||
|
||||
```
|
||||
-o, --output=[PATH,-,type=TYPE[,KEY=VALUE]
|
||||
```
|
||||
|
||||
Sets the export action for the build result. In `docker build` all builds finish
|
||||
by creating a container image and exporting it to `docker images`. `buildx` makes
|
||||
this step configurable allowing results to be exported directly to the client,
|
||||
oci image tarballs, registry etc.
|
||||
|
||||
Buildx with `docker` driver currently only supports local, tarball exporter and
|
||||
image exporter. `docker-container` driver supports all the exporters.
|
||||
|
||||
If just the path is specified as a value, `buildx` will use the local exporter
|
||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||
exporter and write to `stdout`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build -o . .
|
||||
$ docker buildx build -o outdir .
|
||||
$ docker buildx build -o - - > out.tar
|
||||
$ docker buildx build -o type=docker .
|
||||
$ docker buildx build -o type=docker,dest=- . > myimage.tar
|
||||
$ docker buildx build -t tonistiigi/foo -o type=registry
|
||||
```
|
||||
|
||||
Supported exported types are:
|
||||
|
||||
#### `local`
|
||||
|
||||
The `local` export type writes all result files to a directory on the client. The
|
||||
new files will be owned by the current user. On multi-platform builds, all results
|
||||
will be put in subdirectories by their platform.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination directory where files will be written
|
||||
|
||||
#### `tar`
|
||||
|
||||
The `tar` export type writes all result files as a single tarball on the client.
|
||||
On multi-platform builds all results will be put in subdirectories by their platform.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
#### `oci`
|
||||
|
||||
The `oci` export type writes the result image or manifest list as an [OCI image
|
||||
layout](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-layout.md)
|
||||
tarball on the client.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||
|
||||
#### `docker`
|
||||
|
||||
The `docker` export type writes the single-platform result image as a [Docker image
|
||||
specification](https://github.com/docker/docker/blob/v20.10.2/image/spec/v1.2.md)
|
||||
tarball on the client. Tarballs created by this exporter are also OCI compatible.
|
||||
|
||||
Currently, multi-platform images cannot be exported with the `docker` export type.
|
||||
The most common usecase for multi-platform images is to directly push to a registry
|
||||
(see [`registry`](#registry)).
|
||||
|
||||
Attribute keys:
|
||||
|
||||
- `dest` - destination path where tarball will be written. If not specified the
|
||||
tar will be loaded automatically to the current docker instance.
|
||||
- `context` - name for the docker context where to import the result
|
||||
|
||||
#### `image`
|
||||
|
||||
The `image` exporter writes the build result as an image or a manifest list. When
|
||||
using `docker` driver the image will appear in `docker images`. Optionally, image
|
||||
can be automatically pushed to a registry by specifying attributes.
|
||||
|
||||
Attribute keys:
|
||||
|
||||
- `name` - name (references) for the new image.
|
||||
- `push` - boolean to automatically push the image.
|
||||
|
||||
#### `registry`
|
||||
|
||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||
|
||||
|
||||
### <a name="push"></a> Push the build result to a registry (--push)
|
||||
|
||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||
build result to registry.
|
||||
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
single-platform build result to `docker images`.
|
||||
|
||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||
|
||||
```
|
||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Use an external cache source for a build. Supported types are `registry` and `local`.
|
||||
The `registry` source can import cache from a cache manifest or (special) image
|
||||
configuration on the registry. The `local` source can import cache from local
|
||||
files previously exported with `--cache-to`.
|
||||
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-from=user/app:cache .
|
||||
$ docker buildx build --cache-from=user/app .
|
||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
```
|
||||
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
|
||||
```
|
||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Export build cache to an external cache destination. Supported types are `registry`,
|
||||
`local` and `inline`. Registry exports build cache to a cache manifest in the
|
||||
registry, local exports cache to a local directory on the client and inline writes
|
||||
the cache metadata into the image configuration.
|
||||
|
||||
`docker` driver currently only supports exporting inline cache metadata to image
|
||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||
to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. “min” on only
|
||||
exports layers already in the final build stage, “max” exports layers for
|
||||
all stages. Metadata is always exported for the whole build.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-to=user/app:cache .
|
||||
$ docker buildx build --cache-to=type=inline .
|
||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
```
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```
|
||||
--allow=ENTITLEMENT
|
||||
```
|
||||
|
||||
Allow extra privileged entitlement. List of entitlements:
|
||||
|
||||
- `network.host` - Allows executions with host networking.
|
||||
- `security.insecure` - Allows executions without sandbox. See
|
||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||
|
||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#--buildkitd-flags-flags))
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||
$ docker buildx build --allow security.insecure .
|
||||
```
|
189
docs/reference/buildx_create.md
Normal file
189
docs/reference/buildx_create.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# buildx create
|
||||
|
||||
```
|
||||
docker buildx create [OPTIONS] [CONTEXT|ENDPOINT]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Create a new builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||
| [`--config string`](#config) | BuildKit config file |
|
||||
| [`--driver string`](#driver) | Driver to use (available: []) |
|
||||
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||
| [`--name string`](#name) | Builder instance name |
|
||||
| [`--node string`](#node) | Create/modify node with given name |
|
||||
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||
| [`--use`](#use) | Set the current builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
Create makes a new builder instance pointing to a docker context or endpoint,
|
||||
where context is the name of a context from `docker context ls` and endpoint is
|
||||
the address for docker socket (eg. `DOCKER_HOST` value).
|
||||
|
||||
By default, the current Docker configuration is used for determining the
|
||||
context/endpoint value.
|
||||
|
||||
Builder instances are isolated environments where builds can be invoked. All
|
||||
Docker contexts also get the default builder instance.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="append"></a> Append a new node to an existing builder (--append)
|
||||
|
||||
The `--append` flag changes the action of the command to append a new node to an
|
||||
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||
for a build based on the platforms it supports.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create mycontext1
|
||||
eager_beaver
|
||||
|
||||
$ docker buildx create --name eager_beaver --append mycontext2
|
||||
eager_beaver
|
||||
```
|
||||
|
||||
### <a name="buildkitd-flags"></a> Specify options for the buildkitd daemon (--buildkitd-flags)
|
||||
|
||||
```
|
||||
--buildkitd-flags FLAGS
|
||||
```
|
||||
|
||||
Adds flags when starting the buildkitd daemon. They take precedence over the
|
||||
configuration file specified by [`--config`](#--config-file). See `buildkitd --help`
|
||||
for the available flags.
|
||||
|
||||
**Example**
|
||||
|
||||
```
|
||||
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||
```
|
||||
|
||||
### <a name="config"></a> Specify a configuration file for the buildkitd daemon (--config)
|
||||
|
||||
```
|
||||
--config FILE
|
||||
```
|
||||
|
||||
Specifies the configuration file for the buildkitd daemon to use. The configuration
|
||||
can be overridden by [`--buildkitd-flags`](#--buildkitd-flags-flags).
|
||||
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
||||
|
||||
### <a name="driver"></a> Set the builder driver to use (--driver)
|
||||
|
||||
```
|
||||
--driver DRIVER
|
||||
```
|
||||
|
||||
Sets the builder driver to be used. There are two available drivers, each have
|
||||
their own specificities.
|
||||
|
||||
- `docker` - Uses the builder that is built into the docker daemon. With this
|
||||
driver, the [`--load`](buildx_build.md#--load) flag is implied by default on
|
||||
`buildx build`. However, building multi-platform images or exporting cache is
|
||||
not currently supported.
|
||||
- `docker-container` - Uses a buildkit container that will be spawned via docker.
|
||||
With this driver, both building multi-platform images and exporting cache are
|
||||
supported. However, images built will not automatically appear in `docker images`
|
||||
(see [`build --load`](buildx_build.md#--load)).
|
||||
- `kubernetes` - Uses a kubernetes pods. With this driver, you can spin up pods
|
||||
with defined buildkit container image to build your images.
|
||||
|
||||
|
||||
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
||||
|
||||
```
|
||||
--driver-opt OPTIONS
|
||||
```
|
||||
|
||||
Passes additional driver-specific options. Details for each driver:
|
||||
|
||||
- `docker` - No driver options
|
||||
- `docker-container`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||
- Example:
|
||||
|
||||
```console
|
||||
--driver docker-container --driver-opt image=moby/buildkit:master,network=host
|
||||
```
|
||||
- `kubernetes`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||
|
||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||
|
||||
The `--leave` flag changes the action of the command to remove a node from a
|
||||
builder. The builder needs to be specified with `--name` and node that is removed
|
||||
is set with `--node`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||
```
|
||||
|
||||
### <a name="name"></a> Specify the name of the builder (--name)
|
||||
|
||||
```
|
||||
--name NAME
|
||||
```
|
||||
|
||||
The `--name` flag specifies the name of the builder to be created or modified.
|
||||
If none is specified, one will be automatically generated.
|
||||
|
||||
### <a name="node"></a> Specify the name of the node (--node)
|
||||
|
||||
```
|
||||
--node NODE
|
||||
```
|
||||
|
||||
The `--node` flag specifies the name of the node to be created or modified. If
|
||||
none is specified, it is the name of the builder it belongs to, with an index
|
||||
number suffix.
|
||||
|
||||
### <a name="platform"></a> Set the platforms supported by the node
|
||||
|
||||
```
|
||||
--platform PLATFORMS
|
||||
```
|
||||
|
||||
The `--platform` flag sets the platforms supported by the node. It expects a
|
||||
comma-separated list of platforms of the form OS/architecture/variant. The node
|
||||
will also automatically detect the platforms it supports, but manual values take
|
||||
priority over the detected ones and can be used when multiple nodes support
|
||||
building for the same platform.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --platform linux/amd64
|
||||
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||
```
|
||||
|
||||
### <a name="use"></a> Automatically switch to the newly created builder
|
||||
|
||||
The `--use` flag automatically switches the current builder to the newly created
|
||||
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
19
docs/reference/buildx_du.md
Normal file
19
docs/reference/buildx_du.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# buildx du
|
||||
|
||||
```
|
||||
docker buildx du
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Disk usage
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
24
docs/reference/buildx_imagetools.md
Normal file
24
docs/reference/buildx_imagetools.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# buildx imagetools
|
||||
|
||||
```
|
||||
docker buildx imagetools [OPTIONS] COMMAND
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Commands to work on images in registry
|
||||
|
||||
### Subcommands
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Imagetools contains commands for working with manifest lists in the registry.
|
||||
These commands are useful for inspecting multi-platform build results.
|
80
docs/reference/buildx_imagetools_create.md
Normal file
80
docs/reference/buildx_imagetools_create.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# buildx imagetools create
|
||||
|
||||
```
|
||||
docker buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Create a new image based on source images
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append to existing manifest |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Imagetools contains commands for working with manifest lists in the registry.
|
||||
These commands are useful for inspecting multi-platform build results.
|
||||
|
||||
Create a new manifest list based on source manifests. The source manifests can
|
||||
be manifest lists or single platform distribution manifests and must already
|
||||
exist in the registry where the new manifest is created. If only one source is
|
||||
specified, create performs a carbon copy.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="append"></a> Append new sources to an existing manifest list (--append)
|
||||
|
||||
Use the `--append` flag to append the new sources to an existing manifest list
|
||||
in the destination.
|
||||
|
||||
### <a name="dry-run"></a> Show final image instead of pushing (--dry-run)
|
||||
|
||||
Use the `--dry-run` flag to not push the image, just show it.
|
||||
|
||||
### <a name="file"></a> Read source descriptor from a file (-f, --file)
|
||||
|
||||
```
|
||||
-f FILE or --file FILE
|
||||
```
|
||||
|
||||
Reads source from files. A source can be a manifest digest, manifest reference,
|
||||
or a JSON of OCI descriptor object.
|
||||
|
||||
In order to define annotations or additional platform properties like `os.version` and
|
||||
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||
|
||||
```
|
||||
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||
docker buildx imagetools create -f descr.json myuser/image
|
||||
```
|
||||
|
||||
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||
|
||||
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||
|
||||
|
||||
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||
|
||||
```
|
||||
-t IMAGE or --tag IMAGE
|
||||
```
|
||||
|
||||
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
|
||||
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||
```
|
47
docs/reference/buildx_imagetools_inspect.md
Normal file
47
docs/reference/buildx_imagetools_inspect.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# buildx imagetools inspect
|
||||
|
||||
```
|
||||
docker buildx imagetools inspect [OPTIONS] NAME
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Show details of image in the registry
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`--raw`](#raw) | Show original JSON manifest |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Show details of image in the registry.
|
||||
|
||||
Example:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect alpine
|
||||
|
||||
Name: docker.io/library/alpine:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v6
|
||||
...
|
||||
```
|
||||
|
||||
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||
|
||||
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||
output.
|
58
docs/reference/buildx_inspect.md
Normal file
58
docs/reference/buildx_inspect.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# buildx inspect
|
||||
|
||||
```
|
||||
docker buildx inspect [NAME]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Inspect current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Shows information about the current or specified builder.
|
||||
|
||||
## Examples
|
||||
|
||||
### Get information about a builder instance
|
||||
|
||||
By default, `inspect` shows information about the current builder. Specify the
|
||||
name of the builder to inspect to get information about that builder.
|
||||
The following example shows information about a builder instance named
|
||||
`elated_tesla`:
|
||||
|
||||
```console
|
||||
$ docker buildx inspect elated_tesla
|
||||
|
||||
Name: elated_tesla
|
||||
Driver: docker-container
|
||||
|
||||
Nodes:
|
||||
Name: elated_tesla0
|
||||
Endpoint: unix:///var/run/docker.sock
|
||||
Status: running
|
||||
Platforms: linux/amd64
|
||||
|
||||
Name: elated_tesla1
|
||||
Endpoint: ssh://ubuntu@1.2.3.4
|
||||
Status: running
|
||||
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
```
|
||||
|
||||
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
|
||||
|
||||
Use the `--bootstrap` option to ensure that the builder is running before
|
||||
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
|
||||
the buildkit container and waits until it is operational. Bootstrapping is
|
||||
automatically done during build, and therefore not necessary. The same BuildKit
|
||||
container is used during the lifetime of the associated builder node (as
|
||||
displayed in `buildx ls`).
|
11
docs/reference/buildx_install.md
Normal file
11
docs/reference/buildx_install.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# buildx install
|
||||
|
||||
```
|
||||
docker buildx install
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Install buildx as a 'docker builder' alias
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
31
docs/reference/buildx_ls.md
Normal file
31
docs/reference/buildx_ls.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# buildx ls
|
||||
|
||||
```
|
||||
docker buildx ls
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
List builder instances
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Lists all builder instances and the nodes for each instance
|
||||
|
||||
**Example**
|
||||
|
||||
```console
|
||||
$ docker buildx ls
|
||||
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
elated_tesla * docker-container
|
||||
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64
|
||||
```
|
||||
|
||||
Each builder has one or more nodes associated with it. The current builder's
|
||||
name is marked with a `*`.
|
23
docs/reference/buildx_prune.md
Normal file
23
docs/reference/buildx_prune.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# buildx prune
|
||||
|
||||
```
|
||||
docker buildx prune
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Remove build cache
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values (e.g. 'until=24h') |
|
||||
| `-f`, `--force` | Do not prompt for confirmation |
|
||||
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
30
docs/reference/buildx_rm.md
Normal file
30
docs/reference/buildx_rm.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# buildx rm
|
||||
|
||||
```
|
||||
docker buildx rm [NAME]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Remove a builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Removes the specified or current builder. It is a no-op attempting to remove the
|
||||
default builder.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||
|
||||
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||
Currently, only supported by the [`docker-container` driver](buildx_create.md#driver).
|
16
docs/reference/buildx_stop.md
Normal file
16
docs/reference/buildx_stop.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# buildx stop
|
||||
|
||||
```
|
||||
docker buildx stop [NAME]
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Stop builder instance
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Stops the specified or current builder. This will not prevent buildx build to
|
||||
restart the builder. The implementation of stop depends on the driver.
|
11
docs/reference/buildx_uninstall.md
Normal file
11
docs/reference/buildx_uninstall.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# buildx uninstall
|
||||
|
||||
```
|
||||
docker buildx uninstall
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Uninstall the 'docker builder' alias
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
25
docs/reference/buildx_use.md
Normal file
25
docs/reference/buildx_use.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# buildx use
|
||||
|
||||
```
|
||||
docker buildx use [OPTIONS] NAME
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Set the current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `--builder string` | Override the configured builder instance |
|
||||
| `--default` | Set builder as default for current context |
|
||||
| `--global` | Builder persists context changes |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Switches the current builder instance. Build commands invoked after this command
|
||||
will run on a specified builder. Alternatively, a context name can be used to
|
||||
switch to the default builder of that context.
|
21
docs/reference/buildx_version.md
Normal file
21
docs/reference/buildx_version.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# buildx version
|
||||
|
||||
```
|
||||
docker buildx version
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Show buildx version information
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Examples
|
||||
|
||||
### View version information
|
||||
|
||||
|
||||
```console
|
||||
$ docker buildx version
|
||||
github.com/docker/buildx v0.5.1-docker 11057da37336192bfc57d81e02359ba7ba848e4a
|
||||
```
|
6
driver/bkimage/bkimage.go
Normal file
6
driver/bkimage/bkimage.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package bkimage
|
||||
|
||||
const (
|
||||
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
|
||||
DefaultRootlessImage = DefaultImage + "-rootless"
|
||||
)
|
@@ -1,6 +1,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -9,22 +11,38 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/driver/bkimage"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/docker/api/types"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appdefaults"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var buildkitImage = "moby/buildkit:master" // TODO: make this verified and configuratble
|
||||
const volumeStateSuffix = "_state"
|
||||
|
||||
type Driver struct {
|
||||
driver.InitConfig
|
||||
factory driver.Factory
|
||||
netMode string
|
||||
image string
|
||||
env []string
|
||||
}
|
||||
|
||||
func (d *Driver) IsMobyDriver() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *Driver) Config() driver.InitConfig {
|
||||
return d.InitConfig
|
||||
}
|
||||
|
||||
func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
@@ -40,7 +58,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
if err := d.start(ctx, sub); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.wait(ctx); err != nil {
|
||||
if err := d.wait(ctx, sub); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -49,29 +67,74 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
}
|
||||
|
||||
func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
if err := l.Wrap("pulling image "+buildkitImage, func() error {
|
||||
rc, err := d.DockerAPI.ImageCreate(ctx, buildkitImage, types.ImageCreateOptions{})
|
||||
imageName := bkimage.DefaultImage
|
||||
if d.image != "" {
|
||||
imageName = d.image
|
||||
}
|
||||
|
||||
if err := l.Wrap("pulling image "+imageName, func() error {
|
||||
ra, err := imagetools.RegistryAuthForRef(imageName, d.Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, types.ImageCreateOptions{
|
||||
RegistryAuth: ra,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, rc)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
// image pulling failed, check if it exists in local image store.
|
||||
// if not, return pulling error. otherwise log it.
|
||||
_, _, errInspect := d.DockerAPI.ImageInspectWithRaw(ctx, imageName)
|
||||
if errInspect != nil {
|
||||
return err
|
||||
}
|
||||
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
||||
}
|
||||
|
||||
cfg := &container.Config{
|
||||
Image: imageName,
|
||||
Env: d.env,
|
||||
}
|
||||
if d.InitConfig.BuildkitFlags != nil {
|
||||
cfg.Cmd = d.InitConfig.BuildkitFlags
|
||||
}
|
||||
|
||||
if err := l.Wrap("creating container "+d.Name, func() error {
|
||||
_, err := d.DockerAPI.ContainerCreate(ctx, &container.Config{
|
||||
Image: buildkitImage,
|
||||
}, &container.HostConfig{
|
||||
hc := &container.HostConfig{
|
||||
Privileged: true,
|
||||
}, &network.NetworkingConfig{}, d.Name)
|
||||
UsernsMode: "host",
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: d.Name + volumeStateSuffix,
|
||||
Target: appdefaults.Root,
|
||||
},
|
||||
},
|
||||
}
|
||||
if d.netMode != "" {
|
||||
hc.NetworkMode = container.NetworkMode(d.netMode)
|
||||
}
|
||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f := d.InitConfig.ConfigFile; f != "" {
|
||||
buf, err := readFileToTar(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := d.start(ctx, l); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.wait(ctx); err != nil {
|
||||
if err := d.wait(ctx, l); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -81,17 +144,28 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) wait(ctx context.Context) error {
|
||||
try := 0
|
||||
func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
|
||||
try := 1
|
||||
for {
|
||||
if err := d.run(ctx, []string{"buildctl", "debug", "workers"}); err != nil {
|
||||
if try > 10 {
|
||||
bufStdout := &bytes.Buffer{}
|
||||
bufStderr := &bytes.Buffer{}
|
||||
if err := d.run(ctx, []string{"buildctl", "debug", "workers"}, bufStdout, bufStderr); err != nil {
|
||||
if try > 15 {
|
||||
if err != nil {
|
||||
d.copyLogs(context.TODO(), l)
|
||||
if bufStdout.Len() != 0 {
|
||||
l.Log(1, bufStdout.Bytes())
|
||||
}
|
||||
if bufStderr.Len() != 0 {
|
||||
l.Log(2, bufStderr.Bytes())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Duration(100+try*20) * time.Millisecond):
|
||||
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
||||
try++
|
||||
continue
|
||||
}
|
||||
@@ -100,6 +174,21 @@ func (d *Driver) wait(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
|
||||
rc, err := d.DockerAPI.ContainerLogs(ctx, d.Name, types.ContainerLogsOptions{
|
||||
ShowStdout: true, ShowStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout := &logWriter{logger: l, stream: 1}
|
||||
stderr := &logWriter{logger: l, stream: 2}
|
||||
if _, err := stdcopy.StdCopy(stdout, stderr, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
return rc.Close()
|
||||
}
|
||||
|
||||
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
||||
execConfig := types.ExecConfig{
|
||||
Cmd: cmd,
|
||||
@@ -124,12 +213,12 @@ func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, erro
|
||||
return execID, resp.Conn, nil
|
||||
}
|
||||
|
||||
func (d *Driver) run(ctx context.Context, cmd []string) error {
|
||||
func (d *Driver) run(ctx context.Context, cmd []string, stdout, stderr io.Writer) (err error) {
|
||||
id, conn, err := d.exec(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, conn); err != nil {
|
||||
if _, err := stdcopy.StdCopy(stdout, stderr, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.Close()
|
||||
@@ -148,7 +237,7 @@ func (d *Driver) start(ctx context.Context, l progress.SubLogger) error {
|
||||
}
|
||||
|
||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
container, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||
ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||
if err != nil {
|
||||
if dockerclient.IsErrNotFound(err) {
|
||||
return &driver.Info{
|
||||
@@ -158,7 +247,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if container.State.Running {
|
||||
if ctn.State.Running {
|
||||
return &driver.Info{
|
||||
Status: driver.Running,
|
||||
}, nil
|
||||
@@ -180,16 +269,21 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
info, err := d.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Status != driver.Inactive {
|
||||
return d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
Force: force,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if rmVolume {
|
||||
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -202,9 +296,16 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
||||
|
||||
conn = demuxConn(conn)
|
||||
|
||||
return client.New(ctx, "", client.WithDialer(func(string, time.Duration) (net.Conn, error) {
|
||||
exp, err := detect.Exporter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
td, _ := exp.(client.TracerDelegate)
|
||||
|
||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||
return conn, nil
|
||||
}))
|
||||
}), client.WithTracerDelegate(td))
|
||||
}
|
||||
|
||||
func (d *Driver) Factory() driver.Factory {
|
||||
@@ -224,7 +325,7 @@ func (d *Driver) Features() map[driver.Feature]bool {
|
||||
func demuxConn(c net.Conn) net.Conn {
|
||||
pr, pw := io.Pipe()
|
||||
// TODO: rewrite parser with Reader() to avoid goroutine switch
|
||||
go stdcopy.StdCopy(pw, os.Stdout, c)
|
||||
go stdcopy.StdCopy(pw, os.Stderr, c)
|
||||
return &demux{
|
||||
Conn: c,
|
||||
Reader: pr,
|
||||
@@ -239,3 +340,36 @@ type demux struct {
|
||||
func (d *demux) Read(dt []byte) (int, error) {
|
||||
return d.Reader.Read(dt)
|
||||
}
|
||||
|
||||
func readFileToTar(fn string) (*bytes.Buffer, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
tw := tar.NewWriter(buf)
|
||||
dt, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Name: "/etc/buildkit/buildkitd.toml",
|
||||
Size: int64(len(dt)),
|
||||
Mode: 0644,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write(dt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
logger progress.SubLogger
|
||||
stream int
|
||||
}
|
||||
|
||||
func (l *logWriter) Write(dt []byte) (int, error) {
|
||||
l.logger.Log(l.stream, dt)
|
||||
return len(dt), nil
|
||||
}
|
||||
|
@@ -2,6 +2,8 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -37,8 +39,28 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
||||
if cfg.DockerAPI == nil {
|
||||
return nil, errors.Errorf("%s driver requires docker API access", f.Name())
|
||||
}
|
||||
d := &Driver{factory: f, InitConfig: cfg}
|
||||
for k, v := range cfg.DriverOpts {
|
||||
switch {
|
||||
case k == "network":
|
||||
d.netMode = v
|
||||
if v == "host" {
|
||||
d.InitConfig.BuildkitFlags = append(d.InitConfig.BuildkitFlags, "--allow-insecure-entitlement=network.host")
|
||||
}
|
||||
case k == "image":
|
||||
d.image = v
|
||||
case strings.HasPrefix(k, "env."):
|
||||
envName := strings.TrimPrefix(k, "env.")
|
||||
if envName == "" {
|
||||
return nil, errors.Errorf("invalid env option %q, expecting env.FOO=bar", k)
|
||||
}
|
||||
d.env = append(d.env, fmt.Sprintf("%s=%s", envName, v))
|
||||
default:
|
||||
return nil, errors.Errorf("invalid driver option %s for docker-container driver", k)
|
||||
}
|
||||
}
|
||||
|
||||
return &Driver{factory: f, InitConfig: cfg}, nil
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (f *factory) AllowsInstances() bool {
|
||||
|
@@ -3,7 +3,6 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
@@ -34,12 +33,12 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
||||
return client.New(ctx, "", client.WithDialer(func(string, time.Duration) (net.Conn, error) {
|
||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
|
||||
}))
|
||||
}
|
||||
@@ -48,9 +47,8 @@ func (d *Driver) Features() map[driver.Feature]bool {
|
||||
return map[driver.Feature]bool{
|
||||
driver.OCIExporter: false,
|
||||
driver.DockerExporter: false,
|
||||
|
||||
driver.CacheExport: false,
|
||||
driver.MultiPlatform: false,
|
||||
driver.CacheExport: false,
|
||||
driver.MultiPlatform: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,4 +56,10 @@ func (d *Driver) Factory() driver.Factory {
|
||||
return d.factory
|
||||
}
|
||||
|
||||
func (d *Driver) IsDefaultMobyDriver() {}
|
||||
func (d *Driver) IsMobyDriver() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *Driver) Config() driver.InitConfig {
|
||||
return d.InitConfig
|
||||
}
|
||||
|
@@ -44,6 +44,9 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
||||
if cfg.DockerAPI == nil {
|
||||
return nil, errors.Errorf("docker driver requires docker API access")
|
||||
}
|
||||
if cfg.ConfigFile != "" {
|
||||
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
|
||||
}
|
||||
|
||||
return &Driver{factory: f, InitConfig: cfg}, nil
|
||||
}
|
||||
|
@@ -3,7 +3,9 @@ package driver
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -39,6 +41,12 @@ func (s Status) String() string {
|
||||
|
||||
type Info struct {
|
||||
Status Status
|
||||
// DynamicNodes must be empty if the actual nodes are statically listed in the store
|
||||
DynamicNodes []store.Node
|
||||
}
|
||||
|
||||
type Auth interface {
|
||||
GetAuthConfig(registryHostname string) (clitypes.AuthConfig, error)
|
||||
}
|
||||
|
||||
type Driver interface {
|
||||
@@ -46,9 +54,11 @@ type Driver interface {
|
||||
Bootstrap(context.Context, progress.Logger) error
|
||||
Info(context.Context) (*Info, error)
|
||||
Stop(ctx context.Context, force bool) error
|
||||
Rm(ctx context.Context, force bool) error
|
||||
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||
Client(ctx context.Context) (*client.Client, error)
|
||||
Features() map[Feature]bool
|
||||
IsMobyDriver() bool
|
||||
Config() InitConfig
|
||||
}
|
||||
|
||||
func Boot(ctx context.Context, d Driver, pw progress.Writer) (*client.Client, error) {
|
||||
@@ -63,16 +73,12 @@ func Boot(ctx context.Context, d Driver, pw progress.Writer) (*client.Client, er
|
||||
if try > 2 {
|
||||
return nil, errors.Errorf("failed to bootstrap %T driver in attempts", d)
|
||||
}
|
||||
if err := d.Bootstrap(ctx, func(s *client.SolveStatus) {
|
||||
if pw != nil {
|
||||
pw.Status() <- s
|
||||
}
|
||||
}); err != nil {
|
||||
if err := d.Bootstrap(ctx, pw.Write); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c, err := d.Client(context.TODO())
|
||||
c, err := d.Client(ctx)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrNotRunning && try <= 2 {
|
||||
continue
|
||||
|
198
driver/kubernetes/driver.go
Normal file
198
driver/kubernetes/driver.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/driver/kubernetes/execconn"
|
||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"github.com/pkg/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DriverName = "kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// valid values for driver-opt loadbalance
|
||||
LoadbalanceRandom = "random"
|
||||
LoadbalanceSticky = "sticky"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
driver.InitConfig
|
||||
factory driver.Factory
|
||||
minReplicas int
|
||||
deployment *appsv1.Deployment
|
||||
clientset *kubernetes.Clientset
|
||||
deploymentClient clientappsv1.DeploymentInterface
|
||||
podClient clientcorev1.PodInterface
|
||||
podChooser podchooser.PodChooser
|
||||
}
|
||||
|
||||
func (d *Driver) IsMobyDriver() bool {
|
||||
return false
|
||||
}
|
||||
func (d *Driver) Config() driver.InitConfig {
|
||||
return d.InitConfig
|
||||
}
|
||||
|
||||
func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
||||
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// TODO: return err if err != ErrNotFound
|
||||
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
||||
}
|
||||
}
|
||||
return sub.Wrap(
|
||||
fmt.Sprintf("waiting for %d pods to be ready", d.minReplicas),
|
||||
func() error {
|
||||
if err := d.wait(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Driver) wait(ctx context.Context) error {
|
||||
// TODO: use watch API
|
||||
var (
|
||||
err error
|
||||
depl *appsv1.Deployment
|
||||
)
|
||||
for try := 0; try < 100; try++ {
|
||||
depl, err = d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if depl.Status.ReadyReplicas >= int32(d.minReplicas) {
|
||||
return nil
|
||||
}
|
||||
err = errors.Errorf("expected %d replicas to be ready, got %d",
|
||||
d.minReplicas, depl.Status.ReadyReplicas)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Duration(100+try*20) * time.Millisecond):
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
depl, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// TODO: return err if err != ErrNotFound
|
||||
return &driver.Info{
|
||||
Status: driver.Inactive,
|
||||
}, nil
|
||||
}
|
||||
if depl.Status.ReadyReplicas <= 0 {
|
||||
return &driver.Info{
|
||||
Status: driver.Stopped,
|
||||
}, nil
|
||||
}
|
||||
pods, err := podchooser.ListRunningPods(ctx, d.podClient, depl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var dynNodes []store.Node
|
||||
for _, p := range pods {
|
||||
node := store.Node{
|
||||
Name: p.Name,
|
||||
// Other fields are unset (TODO: detect real platforms)
|
||||
}
|
||||
|
||||
if p.Annotations != nil {
|
||||
if p, ok := p.Annotations[manifest.AnnotationPlatform]; ok {
|
||||
ps, err := platformutil.Parse(strings.Split(p, ","))
|
||||
if err == nil {
|
||||
node.Platforms = ps
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dynNodes = append(dynNodes, node)
|
||||
}
|
||||
return &driver.Info{
|
||||
Status: driver.Running,
|
||||
DynamicNodes: dynNodes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
// future version may scale the replicas to zero here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
||||
restClient := d.clientset.CoreV1().RESTClient()
|
||||
restClientConfig, err := d.KubeClientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod, err := d.podChooser.ChoosePod(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pod.Spec.Containers) == 0 {
|
||||
return nil, errors.Errorf("pod %s does not have any container", pod.Name)
|
||||
}
|
||||
containerName := pod.Spec.Containers[0].Name
|
||||
cmd := []string{"buildctl", "dial-stdio"}
|
||||
conn, err := execconn.ExecConn(restClient, restClientConfig,
|
||||
pod.Namespace, pod.Name, containerName, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exp, err := detect.Exporter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
td, _ := exp.(client.TracerDelegate)
|
||||
|
||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||
return conn, nil
|
||||
}), client.WithTracerDelegate(td))
|
||||
}
|
||||
|
||||
func (d *Driver) Factory() driver.Factory {
|
||||
return d.factory
|
||||
}
|
||||
|
||||
func (d *Driver) Features() map[driver.Feature]bool {
|
||||
return map[driver.Feature]bool{
|
||||
driver.OCIExporter: true,
|
||||
driver.DockerExporter: d.DockerAPI != nil,
|
||||
|
||||
driver.CacheExport: true,
|
||||
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
||||
}
|
||||
}
|
135
driver/kubernetes/execconn/execconn.go
Normal file
135
driver/kubernetes/execconn/execconn.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package execconn
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
func ExecConn(restClient rest.Interface, restConfig *rest.Config, namespace, pod, container string, cmd []string) (net.Conn, error) {
|
||||
req := restClient.
|
||||
Post().
|
||||
Namespace(namespace).
|
||||
Resource("pods").
|
||||
Name(pod).
|
||||
SubResource("exec").
|
||||
VersionedParams(&corev1.PodExecOptions{
|
||||
Container: container,
|
||||
Command: cmd,
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
TTY: false,
|
||||
}, scheme.ParameterCodec)
|
||||
exec, err := remotecommand.NewSPDYExecutor(restConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stdinR, stdinW := io.Pipe()
|
||||
stdoutR, stdoutW := io.Pipe()
|
||||
kc := &kubeConn{
|
||||
stdin: stdinW,
|
||||
stdout: stdoutR,
|
||||
localAddr: dummyAddr{network: "dummy", s: "dummy-0"},
|
||||
remoteAddr: dummyAddr{network: "dummy", s: "dummy-1"},
|
||||
}
|
||||
go func() {
|
||||
serr := exec.Stream(remotecommand.StreamOptions{
|
||||
Stdin: stdinR,
|
||||
Stdout: stdoutW,
|
||||
Stderr: os.Stderr,
|
||||
Tty: false,
|
||||
})
|
||||
if serr != nil {
|
||||
logrus.Error(serr)
|
||||
}
|
||||
}()
|
||||
return kc, nil
|
||||
}
|
||||
|
||||
type kubeConn struct {
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stdioClosedMu sync.Mutex // for stdinClosed and stdoutClosed
|
||||
stdinClosed bool
|
||||
stdoutClosed bool
|
||||
localAddr net.Addr
|
||||
remoteAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *kubeConn) Write(p []byte) (int, error) {
|
||||
return c.stdin.Write(p)
|
||||
}
|
||||
|
||||
func (c *kubeConn) Read(p []byte) (int, error) {
|
||||
return c.stdout.Read(p)
|
||||
}
|
||||
|
||||
func (c *kubeConn) CloseWrite() error {
|
||||
err := c.stdin.Close()
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdinClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return err
|
||||
}
|
||||
func (c *kubeConn) CloseRead() error {
|
||||
err := c.stdout.Close()
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdoutClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *kubeConn) Close() error {
|
||||
var err error
|
||||
c.stdioClosedMu.Lock()
|
||||
stdinClosed := c.stdinClosed
|
||||
c.stdioClosedMu.Unlock()
|
||||
if !stdinClosed {
|
||||
err = c.CloseWrite()
|
||||
}
|
||||
c.stdioClosedMu.Lock()
|
||||
stdoutClosed := c.stdoutClosed
|
||||
c.stdioClosedMu.Unlock()
|
||||
if !stdoutClosed {
|
||||
err = c.CloseRead()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *kubeConn) LocalAddr() net.Addr {
|
||||
return c.localAddr
|
||||
}
|
||||
func (c *kubeConn) RemoteAddr() net.Addr {
|
||||
return c.remoteAddr
|
||||
}
|
||||
func (c *kubeConn) SetDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
func (c *kubeConn) SetReadDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
func (c *kubeConn) SetWriteDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type dummyAddr struct {
|
||||
network string
|
||||
s string
|
||||
}
|
||||
|
||||
func (d dummyAddr) Network() string {
|
||||
return d.network
|
||||
}
|
||||
|
||||
func (d dummyAddr) String() string {
|
||||
return d.s
|
||||
}
|
165
driver/kubernetes/factory.go
Normal file
165
driver/kubernetes/factory.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/driver/bkimage"
|
||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const prioritySupported = 40
|
||||
const priorityUnsupported = 80
|
||||
|
||||
func init() {
|
||||
driver.Register(&factory{})
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
}
|
||||
|
||||
func (*factory) Name() string {
|
||||
return DriverName
|
||||
}
|
||||
|
||||
func (*factory) Usage() string {
|
||||
return DriverName
|
||||
}
|
||||
|
||||
func (*factory) Priority(ctx context.Context, api dockerclient.APIClient) int {
|
||||
if api == nil {
|
||||
return priorityUnsupported
|
||||
}
|
||||
return prioritySupported
|
||||
}
|
||||
|
||||
func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver, error) {
|
||||
if cfg.KubeClientConfig == nil {
|
||||
return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName)
|
||||
}
|
||||
deploymentName, err := buildxNameToDeploymentName(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespace, _, err := cfg.KubeClientConfig.Namespace()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot determine Kubernetes namespace, specify manually")
|
||||
}
|
||||
restClientConfig, err := cfg.KubeClientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(restClientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &Driver{
|
||||
factory: f,
|
||||
InitConfig: cfg,
|
||||
clientset: clientset,
|
||||
}
|
||||
deploymentOpt := &manifest.DeploymentOpt{
|
||||
Name: deploymentName,
|
||||
Image: bkimage.DefaultImage,
|
||||
Replicas: 1,
|
||||
BuildkitFlags: cfg.BuildkitFlags,
|
||||
Rootless: false,
|
||||
Platforms: cfg.Platforms,
|
||||
}
|
||||
loadbalance := LoadbalanceSticky
|
||||
imageOverride := ""
|
||||
for k, v := range cfg.DriverOpts {
|
||||
switch k {
|
||||
case "image":
|
||||
imageOverride = v
|
||||
case "namespace":
|
||||
namespace = v
|
||||
case "replicas":
|
||||
deploymentOpt.Replicas, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "requests.cpu":
|
||||
deploymentOpt.RequestsCPU = v
|
||||
case "requests.memory":
|
||||
deploymentOpt.RequestsMemory = v
|
||||
case "limits.cpu":
|
||||
deploymentOpt.LimitsCPU = v
|
||||
case "limits.memory":
|
||||
deploymentOpt.LimitsMemory = v
|
||||
case "rootless":
|
||||
deploymentOpt.Rootless, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deploymentOpt.Image = bkimage.DefaultRootlessImage
|
||||
case "nodeselector":
|
||||
kvs := strings.Split(strings.Trim(v, `"`), ",")
|
||||
s := map[string]string{}
|
||||
for i := range kvs {
|
||||
kv := strings.Split(kvs[i], "=")
|
||||
if len(kv) == 2 {
|
||||
s[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
deploymentOpt.NodeSelector = s
|
||||
case "loadbalance":
|
||||
switch v {
|
||||
case LoadbalanceSticky:
|
||||
case LoadbalanceRandom:
|
||||
default:
|
||||
return nil, errors.Errorf("invalid loadbalance %q", v)
|
||||
}
|
||||
loadbalance = v
|
||||
default:
|
||||
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
||||
}
|
||||
}
|
||||
if imageOverride != "" {
|
||||
deploymentOpt.Image = imageOverride
|
||||
}
|
||||
d.deployment, err = manifest.NewDeployment(deploymentOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.minReplicas = deploymentOpt.Replicas
|
||||
d.deploymentClient = clientset.AppsV1().Deployments(namespace)
|
||||
d.podClient = clientset.CoreV1().Pods(namespace)
|
||||
switch loadbalance {
|
||||
case LoadbalanceSticky:
|
||||
d.podChooser = &podchooser.StickyPodChooser{
|
||||
Key: cfg.ContextPathHash,
|
||||
PodClient: d.podClient,
|
||||
Deployment: d.deployment,
|
||||
}
|
||||
case LoadbalanceRandom:
|
||||
d.podChooser = &podchooser.RandomPodChooser{
|
||||
PodClient: d.podClient,
|
||||
Deployment: d.deployment,
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (f *factory) AllowsInstances() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// buildxNameToDeploymentName converts buildx name to Kubernetes Deployment name.
|
||||
//
|
||||
// eg. "buildx_buildkit_loving_mendeleev0" -> "loving-mendeleev0"
|
||||
func buildxNameToDeploymentName(bx string) (string, error) {
|
||||
// TODO: commands.util.go should not pass "buildx_buildkit_" prefix to drivers
|
||||
if !strings.HasPrefix(bx, "buildx_buildkit_") {
|
||||
return "", errors.Errorf("expected a string with \"buildx_buildkit_\", got %q", bx)
|
||||
}
|
||||
s := strings.TrimPrefix(bx, "buildx_buildkit_")
|
||||
s = strings.ReplaceAll(s, "_", "-")
|
||||
return s, nil
|
||||
}
|
151
driver/kubernetes/manifest/manifest.go
Normal file
151
driver/kubernetes/manifest/manifest.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package manifest
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type DeploymentOpt struct {
|
||||
Namespace string
|
||||
Name string
|
||||
Image string
|
||||
Replicas int
|
||||
BuildkitFlags []string
|
||||
Rootless bool
|
||||
NodeSelector map[string]string
|
||||
RequestsCPU string
|
||||
RequestsMemory string
|
||||
LimitsCPU string
|
||||
LimitsMemory string
|
||||
Platforms []v1.Platform
|
||||
}
|
||||
|
||||
const (
|
||||
containerName = "buildkitd"
|
||||
AnnotationPlatform = "buildx.docker.com/platform"
|
||||
)
|
||||
|
||||
func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
||||
labels := map[string]string{
|
||||
"app": opt.Name,
|
||||
}
|
||||
annotations := map[string]string{}
|
||||
replicas := int32(opt.Replicas)
|
||||
privileged := true
|
||||
args := opt.BuildkitFlags
|
||||
|
||||
if len(opt.Platforms) > 0 {
|
||||
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
|
||||
}
|
||||
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: opt.Namespace,
|
||||
Name: opt.Name,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: opt.Image,
|
||||
Args: args,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
ReadinessProbe: &corev1.Probe{
|
||||
Handler: corev1.Handler{
|
||||
Exec: &corev1.ExecAction{
|
||||
Command: []string{"buildctl", "debug", "workers"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{},
|
||||
Limits: corev1.ResourceList{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if opt.Rootless {
|
||||
if err := toRootless(d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(opt.NodeSelector) > 0 {
|
||||
d.Spec.Template.Spec.NodeSelector = opt.NodeSelector
|
||||
}
|
||||
|
||||
if opt.RequestsCPU != "" {
|
||||
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
|
||||
}
|
||||
|
||||
if opt.RequestsMemory != "" {
|
||||
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
|
||||
}
|
||||
|
||||
if opt.LimitsCPU != "" {
|
||||
limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
|
||||
}
|
||||
|
||||
if opt.LimitsMemory != "" {
|
||||
limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func toRootless(d *appsv1.Deployment) error {
|
||||
d.Spec.Template.Spec.Containers[0].Args = append(
|
||||
d.Spec.Template.Spec.Containers[0].Args,
|
||||
"--oci-worker-no-process-sandbox",
|
||||
)
|
||||
d.Spec.Template.Spec.Containers[0].SecurityContext = nil
|
||||
if d.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
d.Spec.Template.ObjectMeta.Annotations = make(map[string]string, 2)
|
||||
}
|
||||
d.Spec.Template.ObjectMeta.Annotations["container.apparmor.security.beta.kubernetes.io/"+containerName] = "unconfined"
|
||||
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
||||
return nil
|
||||
}
|
97
driver/kubernetes/podchooser/podchooser.go
Normal file
97
driver/kubernetes/podchooser/podchooser.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package podchooser
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/serialx/hashring"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type PodChooser interface {
|
||||
ChoosePod(ctx context.Context) (*corev1.Pod, error)
|
||||
}
|
||||
|
||||
type RandomPodChooser struct {
|
||||
RandSource rand.Source
|
||||
PodClient clientcorev1.PodInterface
|
||||
Deployment *appsv1.Deployment
|
||||
}
|
||||
|
||||
func (pc *RandomPodChooser) ChoosePod(ctx context.Context) (*corev1.Pod, error) {
|
||||
pods, err := ListRunningPods(ctx, pc.PodClient, pc.Deployment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
randSource := pc.RandSource
|
||||
if randSource == nil {
|
||||
randSource = rand.NewSource(time.Now().Unix())
|
||||
}
|
||||
rnd := rand.New(randSource)
|
||||
n := rnd.Int() % len(pods)
|
||||
logrus.Debugf("RandomPodChooser.ChoosePod(): len(pods)=%d, n=%d", len(pods), n)
|
||||
return pods[n], nil
|
||||
}
|
||||
|
||||
type StickyPodChooser struct {
|
||||
Key string
|
||||
PodClient clientcorev1.PodInterface
|
||||
Deployment *appsv1.Deployment
|
||||
}
|
||||
|
||||
func (pc *StickyPodChooser) ChoosePod(ctx context.Context) (*corev1.Pod, error) {
|
||||
pods, err := ListRunningPods(ctx, pc.PodClient, pc.Deployment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var podNames []string
|
||||
podMap := make(map[string]*corev1.Pod, len(pods))
|
||||
for _, pod := range pods {
|
||||
podNames = append(podNames, pod.Name)
|
||||
podMap[pod.Name] = pod
|
||||
}
|
||||
ring := hashring.New(podNames)
|
||||
chosen, ok := ring.GetNode(pc.Key)
|
||||
if !ok {
|
||||
// NOTREACHED
|
||||
logrus.Errorf("no pod found for key %q", pc.Key)
|
||||
rpc := &RandomPodChooser{
|
||||
PodClient: pc.PodClient,
|
||||
Deployment: pc.Deployment,
|
||||
}
|
||||
return rpc.ChoosePod(ctx)
|
||||
}
|
||||
return podMap[chosen], nil
|
||||
}
|
||||
|
||||
func ListRunningPods(ctx context.Context, client clientcorev1.PodInterface, depl *appsv1.Deployment) ([]*corev1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(depl.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listOpts := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
}
|
||||
podList, err := client.List(ctx, listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var runningPods []*corev1.Pod
|
||||
for i := range podList.Items {
|
||||
pod := &podList.Items[i]
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
logrus.Debugf("pod runnning: %q", pod.Name)
|
||||
runningPods = append(runningPods, pod)
|
||||
}
|
||||
}
|
||||
sort.Slice(runningPods, func(i, j int) bool {
|
||||
return runningPods[i].Name < runningPods[j].Name
|
||||
})
|
||||
return runningPods, nil
|
||||
}
|
@@ -2,9 +2,16 @@ package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/moby/buildkit/client"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -21,12 +28,37 @@ type BuildkitConfig struct {
|
||||
// Rootless bool
|
||||
}
|
||||
|
||||
type KubeClientConfig interface {
|
||||
ClientConfig() (*rest.Config, error)
|
||||
Namespace() (string, bool, error)
|
||||
}
|
||||
|
||||
type KubeClientConfigInCluster struct{}
|
||||
|
||||
func (k KubeClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
|
||||
func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
||||
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return strings.TrimSpace(string(namespace)), true, nil
|
||||
}
|
||||
|
||||
type InitConfig struct {
|
||||
// This object needs updates to be generic for different drivers
|
||||
Name string
|
||||
DockerAPI dockerclient.APIClient
|
||||
BuildkitConfig BuildkitConfig
|
||||
Meta map[string]interface{}
|
||||
Name string
|
||||
DockerAPI dockerclient.APIClient
|
||||
KubeClientConfig KubeClientConfig
|
||||
BuildkitFlags []string
|
||||
ConfigFile string
|
||||
DriverOpts map[string]string
|
||||
Auth Auth
|
||||
Platforms []specs.Platform
|
||||
// ContextPathHash can be used for determining pods in the driver instance
|
||||
ContextPathHash string
|
||||
}
|
||||
|
||||
var drivers map[string]Factory
|
||||
@@ -71,10 +103,17 @@ func GetFactory(name string, instanceRequired bool) Factory {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient) (Driver, error) {
|
||||
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, config string, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
||||
ic := InitConfig{
|
||||
DockerAPI: api,
|
||||
Name: name,
|
||||
DockerAPI: api,
|
||||
KubeClientConfig: kcc,
|
||||
Name: name,
|
||||
BuildkitFlags: flags,
|
||||
ConfigFile: config,
|
||||
DriverOpts: do,
|
||||
Auth: auth,
|
||||
Platforms: platforms,
|
||||
ContextPathHash: contextPathHash,
|
||||
}
|
||||
if f == nil {
|
||||
var err error
|
||||
@@ -83,5 +122,27 @@ func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.API
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return f.New(ctx, ic)
|
||||
d, err := f.New(ctx, ic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cachedDriver{Driver: d}, nil
|
||||
}
|
||||
|
||||
func GetFactories() map[string]Factory {
|
||||
return drivers
|
||||
}
|
||||
|
||||
type cachedDriver struct {
|
||||
Driver
|
||||
client *client.Client
|
||||
err error
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (d *cachedDriver) Client(ctx context.Context) (*client.Client, error) {
|
||||
d.once.Do(func() {
|
||||
d.client, d.err = d.Driver.Client(ctx)
|
||||
})
|
||||
return d.client, d.err
|
||||
}
|
||||
|
98
go.mod
98
go.mod
@@ -1,88 +1,66 @@
|
||||
module github.com/docker/buildx
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.6 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||
github.com/bitly/go-simplejson v0.5.0 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50
|
||||
github.com/containerd/containerd v1.3.0-0.20190426060238-3a3f0aac8819
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
||||
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect
|
||||
github.com/compose-spec/compose-go v0.0.0-20210706130854-69459d4976b5
|
||||
github.com/containerd/console v1.0.2
|
||||
github.com/containerd/containerd v1.5.2
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
||||
github.com/docker/cli v1.14.0-0.20190523191156-ab688a9a79a1
|
||||
github.com/docker/cli v20.10.7+incompatible
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/docker v1.14.0-0.20190410063227-3998dffb806f3887f804b813069f59bc14a7f3c1
|
||||
github.com/docker/docker-credential-helpers v0.6.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.6.4-0.20210125172408-38bea2ce277a // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.0-20170502235133-d466d4f6fd96 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/gofrs/flock v0.7.0
|
||||
github.com/gofrs/uuid v3.2.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.2.1 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||
github.com/gofrs/flock v0.7.3
|
||||
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
||||
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
||||
github.com/gorilla/mux v1.7.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||
github.com/hashicorp/go-version v1.1.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/imdario/mergo v0.3.7 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||
github.com/hashicorp/hcl/v2 v2.8.2
|
||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/jinzhu/now v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.6 // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/lib/pq v1.0.0 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.5 // indirect
|
||||
github.com/lib/pq v1.10.0 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.10.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/pkcs11 v0.0.0-20190322140431-074fd7a1ed19 // indirect
|
||||
github.com/moby/buildkit v0.5.2-0.20190513182223-f238f1efb04f
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/moby/buildkit v0.8.2-0.20210702160134-1a7543a10527
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runtime-spec v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.8.0 // indirect
|
||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect
|
||||
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 // indirect
|
||||
github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be // indirect
|
||||
github.com/sirupsen/logrus v1.4.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/viper v1.3.2 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20160323030313-93e72a773fad // indirect
|
||||
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 // indirect
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/zclconf/go-cty v1.7.1
|
||||
go.opentelemetry.io/otel v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/api v0.0.0-20180712090710-2d6f90ab1293 // indirect
|
||||
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d // indirect
|
||||
k8s.io/client-go v2.0.0-alpha.0.0.20180806134042-1f13a808da65+incompatible // indirect
|
||||
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787 // indirect
|
||||
k8s.io/api v0.20.6
|
||||
k8s.io/apimachinery v0.20.6
|
||||
k8s.io/client-go v0.20.6
|
||||
)
|
||||
|
||||
replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210609100121-ef4d47340142+incompatible
|
||||
)
|
||||
|
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"Vendor": true,
|
||||
"Deadline": "8m",
|
||||
"Exclude": [".*.pb.go"],
|
||||
"DisableAll": true,
|
||||
"Enable": [
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"ineffassign",
|
||||
"vet",
|
||||
"deadcode"
|
||||
]
|
||||
}
|
@@ -1,56 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -eu
|
||||
|
||||
: ${TARGETPLATFORM=$CLI_PLATFORM}
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
|
||||
set -ex
|
||||
platformFlag=""
|
||||
if [ -n "$TARGETPLATFORM" ]; then
|
||||
platformFlag="--platform $TARGETPLATFORM"
|
||||
fi
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi
|
||||
|
||||
binariesDocker() {
|
||||
mkdir -p bin/tmp
|
||||
export DOCKER_BUILDKIT=1
|
||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
|
||||
platformFlag=""
|
||||
if [ -n "$TARGETPLATFORM" ]; then
|
||||
platformFlag="--build-arg=TARGETPLATFORM=$TARGETPLATFORM"
|
||||
fi
|
||||
|
||||
docker build $platformFlag --target=binaries --iidfile $iidfile --force-rm .
|
||||
iid=$(cat $iidfile)
|
||||
containerID=$(docker create $iid copy)
|
||||
docker cp $containerID:/ bin/tmp
|
||||
mv bin/tmp/build* bin/
|
||||
rm -rf bin/tmp
|
||||
docker rm $containerID
|
||||
docker rmi -f $iid
|
||||
rm -f $iidfile
|
||||
}
|
||||
|
||||
binaries() {
|
||||
platformFlag=""
|
||||
if [ ! -z "$TARGETPLATFORM" ]; then
|
||||
platformFlag="--frontend-opt=platform=$TARGETPLATFORM"
|
||||
fi
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 \
|
||||
--local context=. --local dockerfile=. \
|
||||
--frontend-opt target=binaries $platformFlag \
|
||||
--output type=local,dest=./bin/
|
||||
}
|
||||
|
||||
case $buildmode in
|
||||
"buildkit")
|
||||
binaries
|
||||
;;
|
||||
"docker-buildkit")
|
||||
binariesDocker
|
||||
;;
|
||||
*)
|
||||
echo "buildctl or docker with buildkit support is required"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
buildxCmd build $platformFlag \
|
||||
--target "binaries" \
|
||||
--output "type=local,dest=./bin/" \
|
||||
.
|
||||
|
38
hack/build_ci_first_pass
Executable file
38
hack/build_ci_first_pass
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TYP=$1
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
echo "usage: ./hack/build_ci_first_pass <binaries>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ -z "$TYP" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
importCacheFlags=""
|
||||
exportCacheFlags=""
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
if [ -n "$cacheRefFrom" ]; then
|
||||
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
||||
fi
|
||||
if [ -n "$cacheRefTo" ]; then
|
||||
exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo"
|
||||
fi
|
||||
fi
|
||||
|
||||
case $TYP in
|
||||
"binaries")
|
||||
buildxCmd build $importCacheFlags $exportCacheFlags \
|
||||
--target "binaries" \
|
||||
$currentcontext
|
||||
;;
|
||||
*)
|
||||
echo >&2 "Unknown type $TYP"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
22
hack/cross
22
hack/cross
@@ -1,20 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -e
|
||||
|
||||
: ${TARGETPLATFORM=linux/amd64,linux/arm/v7,linux/arm64,darwin/amd64,windows/amd64,linux/ppc64le,linux/s390x}
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
: ${TARGETPLATFORM=linux/amd64,linux/arm/v7,linux/arm64,darwin/amd64,windows/amd64,linux/ppc64le,linux/s390x,linux/riscv64}
|
||||
: ${EXPORT_LOCAL=}
|
||||
|
||||
set -ex
|
||||
importCacheFlags=""
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
if [ -n "$cacheRefFrom" ]; then
|
||||
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
||||
fi
|
||||
fi
|
||||
|
||||
exportFlag=""
|
||||
if [ -n "$EXPORT_LOCAL" ]; then
|
||||
exportFlag="--output=type=local,dest=$EXPORT_LOCAL"
|
||||
exportFlag="--output=type=local,dest=$EXPORT_LOCAL"
|
||||
fi
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain";
|
||||
fi
|
||||
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. --opt platform=$TARGETPLATFORM $exportFlag --opt target=binaries
|
||||
buildxCmd build $importCacheFlags $exportFlag \
|
||||
--target "binaries" \
|
||||
--platform "$TARGETPLATFORM" \
|
||||
$currentcontext
|
||||
|
@@ -10,7 +10,7 @@ if [ -n "$TMUX_ENTRYPOINT" ]; then
|
||||
tmux new-window
|
||||
tmux a -t demo
|
||||
else
|
||||
( $dockerdCmd 2>/var/log/dockerd.log & )
|
||||
( $dockerdCmd &>/var/log/dockerd.log & )
|
||||
exec ash
|
||||
fi
|
||||
|
||||
|
@@ -7,6 +7,7 @@ services:
|
||||
image: docker.io/tonistiigi/db
|
||||
webapp:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.webapp
|
||||
args:
|
||||
buildno: 1
|
||||
buildno: 1
|
||||
|
29
hack/dockerfiles/docs.Dockerfile
Normal file
29
hack/dockerfiles/docs.Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
# syntax = docker/dockerfile:1.2
|
||||
|
||||
FROM golang:1.16-alpine AS docsgen
|
||||
WORKDIR /src
|
||||
RUN --mount=target=. \
|
||||
--mount=target=/root/.cache,type=cache \
|
||||
go build -mod=vendor -o /out/docsgen ./docs/docsgen
|
||||
|
||||
FROM alpine AS gen
|
||||
RUN apk add --no-cache rsync git
|
||||
WORKDIR /src
|
||||
COPY --from=docsgen /out/docsgen /usr/bin
|
||||
RUN --mount=target=/context \
|
||||
--mount=target=.,type=tmpfs,readwrite \
|
||||
rsync -a /context/. . && \
|
||||
docsgen && \
|
||||
mkdir /out && cp -r docs/reference /out
|
||||
|
||||
FROM scratch AS update
|
||||
COPY --from=gen /out /out
|
||||
|
||||
FROM gen AS validate
|
||||
RUN --mount=target=/context \
|
||||
--mount=target=.,type=tmpfs,readwrite \
|
||||
rsync -a /context/. . && \
|
||||
git add -A && \
|
||||
rm -rf docs/reference/* && \
|
||||
cp -rf /out/* ./docs/ && \
|
||||
./hack/validate-docs check
|
@@ -1,10 +1,10 @@
|
||||
# syntax=docker/dockerfile:1.0-experimental
|
||||
# syntax=docker/dockerfile:1.2
|
||||
|
||||
FROM golang:1.12-alpine
|
||||
RUN apk add --no-cache git
|
||||
RUN go get -u gopkg.in/alecthomas/gometalinter.v1 \
|
||||
&& mv /go/bin/gometalinter.v1 /go/bin/gometalinter \
|
||||
&& gometalinter --install
|
||||
FROM golang:1.16-alpine
|
||||
RUN apk add --no-cache gcc musl-dev yamllint
|
||||
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
|
||||
WORKDIR /go/src/github.com/docker/buildx
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
||||
gometalinter --config=gometalinter.json ./...
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||
golangci-lint run
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||
yamllint -c .yamllint.yml --strict .
|
@@ -1,5 +1,6 @@
|
||||
# syntax = docker/dockerfile:1.0-experimental
|
||||
FROM golang:1.12-alpine AS vendored
|
||||
# syntax = docker/dockerfile:1.2
|
||||
|
||||
FROM golang:1.16-alpine AS vendored
|
||||
RUN apk add --no-cache git rsync
|
||||
WORKDIR /src
|
||||
RUN --mount=target=/context \
|
||||
|
21
hack/generate-authors
Executable file
21
hack/generate-authors
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu -o pipefail -x
|
||||
|
||||
if [ -x "$(command -v greadlink)" ]; then
|
||||
# on macOS, GNU readlink is ava (greadlink) can be installed through brew install coreutils
|
||||
cd "$(dirname "$(greadlink -f "$BASH_SOURCE")")/.."
|
||||
else
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
||||
fi
|
||||
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
|
||||
{
|
||||
cat <<-'EOH'
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `scripts/generate-authors.sh`.
|
||||
EOH
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > AUTHORS
|
35
hack/lint
35
hack/lint
@@ -1,37 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -eu -o pipefail -x
|
||||
set -eu
|
||||
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi
|
||||
|
||||
lintDocker() {
|
||||
export DOCKER_BUILDKIT=1
|
||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
docker build --iidfile $iidfile -f ./hack/dockerfiles/lint.Dockerfile --force-rm .
|
||||
iid=$(cat $iidfile)
|
||||
docker rmi $iid
|
||||
rm -f $iidfile
|
||||
}
|
||||
|
||||
lint() {
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 \
|
||||
--local context=. --local dockerfile=. \
|
||||
--frontend-opt filename=./hack/dockerfiles/lint.Dockerfile
|
||||
}
|
||||
|
||||
case $buildmode in
|
||||
"buildkit")
|
||||
lint
|
||||
;;
|
||||
"docker-buildkit")
|
||||
lintDocker
|
||||
;;
|
||||
*)
|
||||
echo "buildctl or docker with buildkit support is required"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
buildxCmd build --file ./hack/dockerfiles/lint.Dockerfile .
|
||||
|
40
hack/release
40
hack/release
@@ -1,32 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TAG=$1
|
||||
OUT=$2
|
||||
OUT=${1:-release-out}
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -eu -o pipefail
|
||||
|
||||
: ${PLATFORMS=linux/amd64}
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi
|
||||
|
||||
|
||||
usage() {
|
||||
echo "usage: ./hack/release <tag> <out>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ -z "$TAG" ] || [ -z "$OUT" ]; then
|
||||
usage
|
||||
importCacheFlags=""
|
||||
if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then
|
||||
for ref in $cacheRefFrom; do
|
||||
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
||||
done
|
||||
fi
|
||||
|
||||
buildxCmd build $importCacheFlags \
|
||||
--target "release" \
|
||||
--platform "$PLATFORMS" \
|
||||
--output "type=local,dest=$OUT" \
|
||||
$currentcontext
|
||||
|
||||
set -x
|
||||
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 \
|
||||
--local context=. --local dockerfile=. \
|
||||
--opt target=release \
|
||||
--opt platform=$PLATFORMS \
|
||||
--exporter local \
|
||||
--exporter-opt output=$OUT
|
||||
# wrap binaries
|
||||
{ set +x; } 2>/dev/null
|
||||
if [[ $PLATFORMS =~ "," ]]; then
|
||||
mv -f ./$OUT/**/* ./$OUT/
|
||||
find ./$OUT -type d -empty -delete
|
||||
fi
|
64
hack/test
64
hack/test
@@ -3,51 +3,45 @@
|
||||
. $(dirname $0)/util
|
||||
set -eu -o pipefail
|
||||
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
: ${BUILDX_NOCACHE=}
|
||||
: ${TEST_COVERAGE=}
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi
|
||||
importCacheFlags=""
|
||||
if [ -n "$cacheRefFrom" ]; then
|
||||
if [ "$cacheType" = "local" ]; then
|
||||
for ref in $cacheRefFrom; do
|
||||
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
iid="buildx-tests"
|
||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
set -x
|
||||
|
||||
case $buildmode in
|
||||
"buildkit")
|
||||
tmpfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 \
|
||||
--local context=. --local dockerfile=. \
|
||||
--frontend-opt target=integration-tests \
|
||||
--output type=docker,name=$iid,dest=$tmpfile
|
||||
docker load -i $tmpfile
|
||||
rm $tmpfile
|
||||
;;
|
||||
"docker-buildkit")
|
||||
export DOCKER_BUILDKIT=1
|
||||
docker build --iidfile $iidfile --target integration-tests --force-rm .
|
||||
iid=$(cat $iidfile)
|
||||
;;
|
||||
*)
|
||||
echo "docker with buildkit support is required"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
cacheVolume="buildx-cache"
|
||||
if ! docker inspect "$cacheVolume" 2>&1 >/dev/null ; then
|
||||
cacheVolume=$(docker create --name=buildx-cache -v /root/.cache -v /go/pkg/mod alpine)
|
||||
coverageVol=""
|
||||
coverageFlags=""
|
||||
if [ "$TEST_COVERAGE" = "1" ]; then
|
||||
covdir="$(pwd)/coverage"
|
||||
mkdir -p "$covdir"
|
||||
coverageVol="-v $covdir:/coverage"
|
||||
coverageFlags="-coverprofile=/coverage/coverage.txt -covermode=atomic"
|
||||
fi
|
||||
|
||||
docker run --rm -v /tmp --volumes-from=$cacheVolume --privileged $iid go test ${TESTFLAGS:--v} ${TESTPKGS:-./...}
|
||||
buildxCmd build $importCacheFlags \
|
||||
--target "integration-tests" \
|
||||
--output "type=docker,name=$iid" \
|
||||
$currentcontext
|
||||
|
||||
cacheVolume="buildx-cache"
|
||||
if ! docker inspect "$cacheVolume" > /dev/null 2>&1; then
|
||||
cacheVolume=$(docker create --name=buildx-cache -v /root/.cache -v /go/pkg/mod alpine)
|
||||
fi
|
||||
|
||||
docker run --rm -v /tmp $coverageVol --volumes-from=$cacheVolume --privileged $iid go test $coverageFlags ${TESTFLAGS:--v} ${TESTPKGS:-./...}
|
||||
|
||||
if [ -n "$BUILDX_NOCACHE" ]; then
|
||||
docker rm -v $cacheVolume
|
||||
fi
|
||||
|
||||
case $buildmode in
|
||||
"docker-buildkit")
|
||||
rm "$iidfile"
|
||||
docker rmi $iid
|
||||
;;
|
||||
esac
|
||||
rm "$iidfile"
|
||||
docker rmi $iid
|
||||
|
16
hack/update-docs
Executable file
16
hack/update-docs
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -eu
|
||||
|
||||
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||
|
||||
buildxCmd build \
|
||||
--target "update" \
|
||||
--output "type=local,dest=$output" \
|
||||
--file "./hack/dockerfiles/docs.Dockerfile" \
|
||||
.
|
||||
|
||||
rm -rf ./docs/reference/*
|
||||
cp -R "$output"/out/* ./docs/
|
||||
rm -rf $output
|
@@ -1,45 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. $(dirname $0)/util
|
||||
set -eu -o pipefail -x
|
||||
set -eu
|
||||
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi
|
||||
buildxCmd build \
|
||||
--target "update" \
|
||||
--output "type=local,dest=$output" \
|
||||
--file "./hack/dockerfiles/vendor.Dockerfile" \
|
||||
.
|
||||
|
||||
case $buildmode in
|
||||
"buildkit")
|
||||
output=$(mktemp -d -t buildctl-output.XXXXXXXXXX)
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. \
|
||||
--frontend-opt target=update \
|
||||
--frontend-opt filename=./hack/dockerfiles/vendor.Dockerfile \
|
||||
--output type=local,dest=$output
|
||||
rm -rf ./vendor
|
||||
cp -R "$output/out/" .
|
||||
rm -rf $output
|
||||
;;
|
||||
*)
|
||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
case $buildmode in
|
||||
"docker-buildkit")
|
||||
export DOCKER_BUILDKIT=1
|
||||
docker build --iidfile $iidfile -f ./hack/dockerfiles/vendor.Dockerfile --target update --force-rm .
|
||||
;;
|
||||
*)
|
||||
echo "buildctl or docker with buildkit support is required"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
iid=$(cat $iidfile)
|
||||
cid=$(docker create $iid noop)
|
||||
rm -rf ./vendor
|
||||
|
||||
docker cp $cid:/out/go.mod .
|
||||
docker cp $cid:/out/go.sum .
|
||||
docker cp $cid:/out/vendor .
|
||||
|
||||
docker rm $cid
|
||||
rm -f $iidfile
|
||||
;;
|
||||
esac
|
||||
rm -rf ./vendor
|
||||
cp -R "$output"/out/* .
|
||||
rm -rf $output
|
||||
|
72
hack/util
72
hack/util
@@ -1,32 +1,66 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
: ${CI=}
|
||||
: ${PREFER_BUILDCTL=}
|
||||
: ${PREFER_LEGACY=}
|
||||
: ${CLI_PLATFORM=}
|
||||
: ${GITHUB_ACTIONS=}
|
||||
: ${CACHEDIR_FROM=}
|
||||
: ${CACHEDIR_TO=}
|
||||
|
||||
newerEqualThan() { # $1=minimum wanted version $2=actual-version
|
||||
[ "$1" = "$(printf "$1\n$2" | sort -V | head -n 1)" ]
|
||||
if [ "$PREFER_BUILDCTL" = "1" ]; then
|
||||
echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring."
|
||||
fi
|
||||
|
||||
if [ "$PREFER_LEGACY" = "1" ]; then
|
||||
echo >&2 "WARNING: PREFER_LEGACY is no longer supported. Ignoring."
|
||||
fi
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CI" = "true" ]; then
|
||||
progressFlag="--progress=plain"
|
||||
fi
|
||||
|
||||
buildxCmd() {
|
||||
if docker buildx version >/dev/null 2>&1; then
|
||||
set -x
|
||||
docker buildx "$@" $progressFlag
|
||||
elif buildx version >/dev/null 2>&1; then
|
||||
set -x
|
||||
buildx "$@" $progressFlag
|
||||
elif docker version >/dev/null 2>&1; then
|
||||
set -x
|
||||
DOCKER_BUILDKIT=1 docker "$@" $progressFlag
|
||||
else
|
||||
echo >&2 "ERROR: Please enable DOCKER_BUILDKIT or install standalone buildx"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
buildmode="legacy"
|
||||
if [ "$PREFER_BUILDCTL" = "1" ]; then
|
||||
buildmode="buildkit";
|
||||
else
|
||||
serverVersion=$(docker info --format '{{.ServerVersion}}')
|
||||
experimental=$(docker info --format '{{.ExperimentalBuild}}')
|
||||
if [ "$PREFER_LEGACY" != "1" ] && ( newerEqualThan "18.09" $serverVersion || \
|
||||
( newerEqualThan "18.06" $serverVersion && [ "true" = "$experimental" ] ) || \
|
||||
[ "$DOCKER_BUILDKIT" = "1" ]); then
|
||||
buildmode="docker-buildkit";
|
||||
if [ -z "$CLI_PLATFORM" ]; then
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
arch="$(uname -m)"
|
||||
if [ "$arch" = "x86_64" ]; then
|
||||
arch="amd64"
|
||||
fi
|
||||
CLI_PLATFORM="darwin/$arch"
|
||||
elif uname -s | grep MINGW > /dev/null 2>&1 ; then
|
||||
CLI_PLATFORM="windows/amd64"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$CLI_PLATFORM" ]; then
|
||||
rawos=$(uname -s)
|
||||
if [ "$rawos" = "Darwin" ]; then
|
||||
CLI_PLATFORM="darwin/amd64"
|
||||
elif uname -s | grep MINGW 2>&1 >/dev/null ; then
|
||||
CLI_PLATFORM="windows/amd64"
|
||||
fi
|
||||
cacheType=""
|
||||
cacheRefFrom=""
|
||||
cacheRefTo=""
|
||||
currentref=""
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
currentref="git://github.com/$GITHUB_REPOSITORY#$GITHUB_REF"
|
||||
cacheType="local"
|
||||
cacheRefFrom="$CACHEDIR_FROM"
|
||||
cacheRefTo="$CACHEDIR_TO"
|
||||
fi
|
||||
|
||||
currentcontext="."
|
||||
if [ -n "$currentref" ]; then
|
||||
currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref"
|
||||
fi
|
||||
|
29
hack/validate-docs
Executable file
29
hack/validate-docs
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env sh
|
||||
set -eu
|
||||
|
||||
case ${1:-} in
|
||||
'')
|
||||
. $(dirname $0)/util
|
||||
buildxCmd build \
|
||||
--target validate \
|
||||
--file ./hack/dockerfiles/docs.Dockerfile \
|
||||
.
|
||||
;;
|
||||
check)
|
||||
status="$(git status --porcelain -- docs/reference 2>/dev/null)"
|
||||
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
||||
if [ "$diffs" ]; then
|
||||
{
|
||||
set +x
|
||||
echo 'The result of ./hack/update-docs differs'
|
||||
echo
|
||||
echo "$diffs"
|
||||
echo
|
||||
echo 'Please vendor your package with ./hack/update-docs'
|
||||
echo
|
||||
} >&2
|
||||
exit 1
|
||||
fi
|
||||
echo 'Congratulations! All docs changes are done the right way.'
|
||||
;;
|
||||
esac
|
@@ -1,40 +1,20 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -eu
|
||||
|
||||
: ${CONTINUOUS_INTEGRATION=}
|
||||
: ${DOCKER_BUILDKIT=}
|
||||
|
||||
progressFlag=""
|
||||
if [ "$CONTINUOUS_INTEGRATION" = "true" ]; then progressFlag="--progress=plain"; fi
|
||||
|
||||
case ${1:-} in
|
||||
'')
|
||||
. $(dirname $0)/util
|
||||
case $buildmode in
|
||||
"buildkit")
|
||||
buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt filename=./hack/dockerfiles/vendor.Dockerfile --frontend-opt target=validate
|
||||
'')
|
||||
. $(dirname $0)/util
|
||||
buildxCmd build \
|
||||
--target validate \
|
||||
--file ./hack/dockerfiles/vendor.Dockerfile \
|
||||
.
|
||||
;;
|
||||
"docker-buildkit")
|
||||
export DOCKER_BUILDKIT=1
|
||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||
docker build --iidfile $iidfile -f ./hack/dockerfiles/vendor.Dockerfile --target validate --force-rm . || exit 1
|
||||
iid=$(cat $iidfile)
|
||||
docker rmi $iid
|
||||
rm -f $iidfile
|
||||
;;
|
||||
*)
|
||||
echo "buildkit support is required"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
check)
|
||||
status="$(git status --porcelain -- go.mod go.sum vendor 2>/dev/null)"
|
||||
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
||||
if [ "$diffs" ]; then
|
||||
check)
|
||||
status="$(git status --porcelain -- go.mod go.sum vendor 2>/dev/null)"
|
||||
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
||||
if [ "$diffs" ]; then
|
||||
{
|
||||
set +x
|
||||
set +x
|
||||
echo 'The result of "make vendor" differs'
|
||||
echo
|
||||
echo "$diffs"
|
||||
@@ -43,7 +23,7 @@ check)
|
||||
echo
|
||||
} >&2
|
||||
exit 1
|
||||
fi
|
||||
echo 'Congratulations! All vendoring changes are done the right way.'
|
||||
;;
|
||||
fi
|
||||
echo 'Congratulations! All vendoring changes are done the right way.'
|
||||
;;
|
||||
esac
|
||||
|
@@ -10,18 +10,25 @@ import (
|
||||
)
|
||||
|
||||
type NodeGroup struct {
|
||||
Name string
|
||||
Driver string
|
||||
Nodes []Node
|
||||
Name string
|
||||
Driver string
|
||||
Nodes []Node
|
||||
Dynamic bool
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Name string
|
||||
Endpoint string
|
||||
Platforms []specs.Platform
|
||||
Name string
|
||||
Endpoint string
|
||||
Platforms []specs.Platform
|
||||
Flags []string
|
||||
ConfigFile string
|
||||
DriverOpts map[string]string
|
||||
}
|
||||
|
||||
func (ng *NodeGroup) Leave(name string) error {
|
||||
if ng.Dynamic {
|
||||
return errors.New("dynamic node group does not support Leave")
|
||||
}
|
||||
i := ng.findNode(name)
|
||||
if i == -1 {
|
||||
return errors.Errorf("node %q not found for %s", name, ng.Name)
|
||||
@@ -33,7 +40,10 @@ func (ng *NodeGroup) Leave(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpointsSet bool, actionAppend bool) error {
|
||||
func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpointsSet bool, actionAppend bool, flags []string, configFile string, do map[string]string) error {
|
||||
if ng.Dynamic {
|
||||
return errors.New("dynamic node group does not support Update")
|
||||
}
|
||||
i := ng.findNode(name)
|
||||
if i == -1 && !actionAppend {
|
||||
if len(ng.Nodes) > 0 {
|
||||
@@ -55,6 +65,9 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
||||
if len(platforms) > 0 {
|
||||
n.Platforms = pp
|
||||
}
|
||||
if flags != nil {
|
||||
n.Flags = flags
|
||||
}
|
||||
ng.Nodes[i] = n
|
||||
if err := ng.validateDuplicates(endpoint, i); err != nil {
|
||||
return err
|
||||
@@ -72,9 +85,12 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
||||
}
|
||||
|
||||
n := Node{
|
||||
Name: name,
|
||||
Endpoint: endpoint,
|
||||
Platforms: pp,
|
||||
Name: name,
|
||||
Endpoint: endpoint,
|
||||
Platforms: pp,
|
||||
ConfigFile: configFile,
|
||||
Flags: flags,
|
||||
DriverOpts: do,
|
||||
}
|
||||
ng.Nodes = append(ng.Nodes, n)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user