mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-31 16:09:07 +08:00
Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00ed17df6d | ||
|
|
cfb71fab97 | ||
|
|
f62342768b | ||
|
|
7776652a4d | ||
|
|
5a4f80f3ce | ||
|
|
b5ea79e277 | ||
|
|
481796f84f | ||
|
|
0090d49e57 | ||
|
|
389ac0c3d1 | ||
|
|
2bb8ce2f57 | ||
|
|
65cea456fd | ||
|
|
f7bd5b99da | ||
|
|
8c14407fa2 | ||
|
|
5245a2b3ff | ||
|
|
44d99d4573 | ||
|
|
14942a266e | ||
|
|
123febf107 | ||
|
|
3f5f7c5228 | ||
|
|
6d935625a6 | ||
|
|
e640dc6041 | ||
|
|
08244b12b5 | ||
|
|
78d8b926db | ||
|
|
19291d900e | ||
|
|
ed9b4a7169 | ||
|
|
033d5629c0 | ||
|
|
7cd5add568 | ||
|
|
2a000096fa | ||
|
|
b7781447d7 | ||
|
|
f6ba0a23f8 | ||
|
|
bf4b95fc3a | ||
|
|
467586dc8d | ||
|
|
8764628976 | ||
|
|
583fe71740 | ||
|
|
9fb3ff1a27 | ||
|
|
9d4f38c5fa | ||
|
|
793082f543 | ||
|
|
fe6f697205 | ||
|
|
fd3fb752d3 | ||
|
|
7fcea64eb4 | ||
|
|
05e0ce4953 | ||
|
|
f8d9d1e776 | ||
|
|
8a7a221a7f | ||
|
|
e4db8d2a21 | ||
|
|
7394853ddf | ||
|
|
a8be6b576b | ||
|
|
8b960ededd | ||
|
|
4735a71fbd | ||
|
|
37fce8cc06 | ||
|
|
82476ab039 |
139
.github/CONTRIBUTING.md
vendored
139
.github/CONTRIBUTING.md
vendored
@@ -116,60 +116,6 @@ commit automatically with `git commit -s`.
|
||||
|
||||
### Run the unit- and integration-tests
|
||||
|
||||
Running tests:
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
This runs all unit and integration tests, in a containerized environment.
|
||||
Locally, every package can be tested separately with standard Go tools, but
|
||||
integration tests are skipped if local user doesn't have enough permissions or
|
||||
worker binaries are not installed.
|
||||
|
||||
```bash
|
||||
# run unit tests only
|
||||
make test-unit
|
||||
|
||||
# run integration tests only
|
||||
make test-integration
|
||||
|
||||
# test a specific package
|
||||
TESTPKGS=./bake make test
|
||||
|
||||
# run all integration tests with a specific worker
|
||||
TESTFLAGS="--run=//worker=remote -v" make test-integration
|
||||
|
||||
# run a specific integration test
|
||||
TESTFLAGS="--run /TestBuild/worker=remote/ -v" make test-integration
|
||||
|
||||
# run a selection of integration tests using a regexp
|
||||
TESTFLAGS="--run /TestBuild.*/worker=remote/ -v" make test-integration
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Set `TEST_KEEP_CACHE=1` for the test framework to keep external dependant
|
||||
> images in a docker volume if you are repeatedly calling `make test`. This
|
||||
> helps to avoid rate limiting on the remote registry side.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Set `TEST_DOCKERD=1` for the test framework to enable the docker workers,
|
||||
> specifically the `docker` and `docker-container` drivers.
|
||||
>
|
||||
> The docker tests cannot be run in parallel, so require passing `--parallel=1`
|
||||
> in `TESTFLAGS`.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> If you are working behind a proxy, you can set some of or all
|
||||
> `HTTP_PROXY=http://ip:port`, `HTTPS_PROXY=http://ip:port`, `NO_PROXY=http://ip:port`
|
||||
> for the test framework to specify the proxy build args.
|
||||
|
||||
|
||||
### Run the helper commands
|
||||
|
||||
To enter a demo container environment and experiment, you may run:
|
||||
|
||||
```
|
||||
@@ -188,89 +134,6 @@ To generate new vendored files with go modules run:
|
||||
$ make vendor
|
||||
```
|
||||
|
||||
### Generate profiling data
|
||||
|
||||
You can configure Buildx to generate [`pprof`](https://github.com/google/pprof)
|
||||
memory and CPU profiles to analyze and optimize your builds. These profiles are
|
||||
useful for identifying performance bottlenecks, detecting memory
|
||||
inefficiencies, and ensuring the program (Buildx) runs efficiently.
|
||||
|
||||
The following environment variables control whether Buildx generates profiling
|
||||
data for builds:
|
||||
|
||||
```console
|
||||
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||
```
|
||||
|
||||
When set, Buildx emits profiling samples for the builds to the location
|
||||
specified by the environment variable.
|
||||
|
||||
To analyze and visualize profiling samples, you need `pprof` from the Go
|
||||
toolchain, and (optionally) GraphViz for visualization in a graphical format.
|
||||
|
||||
To inspect profiling data with `pprof`:
|
||||
|
||||
1. Build a local binary of Buildx from source.
|
||||
|
||||
```console
|
||||
$ docker buildx bake
|
||||
```
|
||||
|
||||
The binary gets exported to `./bin/build/buildx`.
|
||||
|
||||
2. Run a build and with the environment variables set to generate profiling data.
|
||||
|
||||
```console
|
||||
$ export BUILDX_CPU_PROFILE=buildx_cpu.prof
|
||||
$ export BUILDX_MEM_PROFILE=buildx_mem.prof
|
||||
$ ./bin/build/buildx bake
|
||||
```
|
||||
|
||||
This creates `buildx_cpu.prof` and `buildx_mem.prof` for the build.
|
||||
|
||||
3. Start `pprof` and specify the filename of the profile that you want to
|
||||
analyze.
|
||||
|
||||
```console
|
||||
$ go tool pprof buildx_cpu.prof
|
||||
```
|
||||
|
||||
This opens the `pprof` interactive console. From here, you can inspect the
|
||||
profiling sample using various commands. For example, use `top 10` command
|
||||
to view the top 10 most time-consuming entries.
|
||||
|
||||
```plaintext
|
||||
(pprof) top 10
|
||||
Showing nodes accounting for 3.04s, 91.02% of 3.34s total
|
||||
Dropped 123 nodes (cum <= 0.02s)
|
||||
Showing top 10 nodes out of 159
|
||||
flat flat% sum% cum cum%
|
||||
1.14s 34.13% 34.13% 1.14s 34.13% syscall.syscall
|
||||
0.91s 27.25% 61.38% 0.91s 27.25% runtime.kevent
|
||||
0.35s 10.48% 71.86% 0.35s 10.48% runtime.pthread_cond_wait
|
||||
0.22s 6.59% 78.44% 0.22s 6.59% runtime.pthread_cond_signal
|
||||
0.15s 4.49% 82.93% 0.15s 4.49% runtime.usleep
|
||||
0.10s 2.99% 85.93% 0.10s 2.99% runtime.memclrNoHeapPointers
|
||||
0.10s 2.99% 88.92% 0.10s 2.99% runtime.memmove
|
||||
0.03s 0.9% 89.82% 0.03s 0.9% runtime.madvise
|
||||
0.02s 0.6% 90.42% 0.02s 0.6% runtime.(*mspan).typePointersOfUnchecked
|
||||
0.02s 0.6% 91.02% 0.02s 0.6% runtime.pcvalue
|
||||
```
|
||||
|
||||
To view the call graph in a GUI, run `go tool pprof -http=:8081 <sample>`.
|
||||
|
||||
> [!NOTE]
|
||||
> Requires [GraphViz](https://www.graphviz.org/) to be installed.
|
||||
|
||||
```console
|
||||
$ go tool pprof -http=:8081 buildx_cpu.prof
|
||||
Serving web UI on http://127.0.0.1:8081
|
||||
http://127.0.0.1:8081
|
||||
```
|
||||
|
||||
For more information about using `pprof` and how to interpret the call graph,
|
||||
refer to the [`pprof` README](https://github.com/google/pprof/blob/main/doc/README.md).
|
||||
|
||||
### Conventions
|
||||
|
||||
@@ -426,4 +289,4 @@ The rules:
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](https://blog.golang.org) is also a great resource.
|
||||
[Go Blog](https://blog.golang.org) is also a great resource.
|
||||
124
.github/ISSUE_TEMPLATE/bug.yml
vendored
124
.github/ISSUE_TEMPLATE/bug.yml
vendored
@@ -1,124 +0,0 @@
|
||||
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
|
||||
name: Bug Report
|
||||
description: Report a bug
|
||||
labels:
|
||||
- status/triage
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to report a bug!
|
||||
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Contributing guidelines
|
||||
description: |
|
||||
Please read the contributing guidelines before proceeding.
|
||||
options:
|
||||
- label: I've read the [contributing guidelines](https://github.com/docker/buildx/blob/master/.github/CONTRIBUTING.md) and wholeheartedly agree
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: I've found a bug and checked that ...
|
||||
description: |
|
||||
Make sure that your request fulfills all of the following requirements.
|
||||
If one requirement cannot be satisfied, explain in detail why.
|
||||
options:
|
||||
- label: ... the documentation does not mention anything about my problem
|
||||
- label: ... there are no open or closed issues that are related to my problem
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Please provide a brief description of the bug in 1-2 sentences.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behaviour
|
||||
description: |
|
||||
Please describe precisely what you'd expect to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Actual behaviour
|
||||
description: |
|
||||
Please describe precisely what is actually happening.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Buildx version
|
||||
description: |
|
||||
Output of `docker buildx version` command.
|
||||
Example: `github.com/docker/buildx v0.8.1 5fac64c2c49dae1320f2b51f1a899ca451935554`
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Docker info
|
||||
description: |
|
||||
Output of `docker info` command.
|
||||
render: text
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Builders list
|
||||
description: |
|
||||
Output of `docker buildx ls` command.
|
||||
render: text
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >
|
||||
Please provide a minimal Dockerfile, bake definition (if applicable) and
|
||||
invoked commands to help reproducing your issue.
|
||||
placeholder: |
|
||||
```dockerfile
|
||||
FROM alpine
|
||||
echo hello
|
||||
```
|
||||
|
||||
```hcl
|
||||
group "default" {
|
||||
targets = ["app"]
|
||||
}
|
||||
target "app" {
|
||||
dockerfile = "Dockerfile"
|
||||
target = "build"
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx build .
|
||||
$ docker buildx bake
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Build logs
|
||||
description: |
|
||||
Please provide logs output (and/or BuildKit logs if applicable).
|
||||
render: text
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional info
|
||||
description: |
|
||||
Please provide any additional information that could be useful.
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
12
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,12 +0,0 @@
|
||||
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Questions and Discussions
|
||||
url: https://github.com/docker/buildx/discussions/new
|
||||
about: Use Github Discussions to ask questions and/or open discussion topics.
|
||||
- name: Command line reference
|
||||
url: https://docs.docker.com/engine/reference/commandline/buildx/
|
||||
about: Read the command line reference.
|
||||
- name: Documentation
|
||||
url: https://docs.docker.com/build/
|
||||
about: Read the documentation.
|
||||
15
.github/ISSUE_TEMPLATE/feature.yml
vendored
15
.github/ISSUE_TEMPLATE/feature.yml
vendored
@@ -1,15 +0,0 @@
|
||||
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
|
||||
name: Feature request
|
||||
description: Missing functionality? Come tell us about it!
|
||||
labels:
|
||||
- kind/enhancement
|
||||
- status/triage
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What is the feature you want to see?
|
||||
validations:
|
||||
required: true
|
||||
44
.github/SECURITY.md
vendored
44
.github/SECURITY.md
vendored
@@ -1,44 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
The maintainers of Docker Buildx take security seriously. If you discover
|
||||
a security issue, please bring it to their attention right away!
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately
|
||||
to [security@docker.com](mailto:security@docker.com).
|
||||
|
||||
Reporter(s) can expect a response within 72 hours, acknowledging the issue was
|
||||
received.
|
||||
|
||||
## Review Process
|
||||
|
||||
After receiving the report, an initial triage and technical analysis is
|
||||
performed to confirm the report and determine its scope. We may request
|
||||
additional information in this stage of the process.
|
||||
|
||||
Once a reviewer has confirmed the relevance of the report, a draft security
|
||||
advisory will be created on GitHub. The draft advisory will be used to discuss
|
||||
the issue with maintainers, the reporter(s), and where applicable, other
|
||||
affected parties under embargo.
|
||||
|
||||
If the vulnerability is accepted, a timeline for developing a patch, public
|
||||
disclosure, and patch release will be determined. If there is an embargo period
|
||||
on public disclosure before the patch release, the reporter(s) are expected to
|
||||
participate in the discussion of the timeline and abide by agreed upon dates
|
||||
for public disclosure.
|
||||
|
||||
## Accreditation
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you,
|
||||
although we will keep your name confidential if you request it. We also like to
|
||||
send gifts - if you're into swag, make sure to let us know. We do not currently
|
||||
offer a paid security bounty program at this time.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Once a new feature release is cut, support for the previous feature release is
|
||||
discontinued. An exception may be made for urgent security releases that occur
|
||||
shortly after a new feature release. Buildx does not offer LTS (Long-Term Support)
|
||||
releases. Refer to the [Support Policy](https://github.com/docker/buildx/blob/master/PROJECT.md#support-policy)
|
||||
for further details.
|
||||
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -5,11 +5,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
# ignore this dependency
|
||||
# it seems a bug with dependabot as pining to commit sha should not
|
||||
# trigger a new version: https://github.com/docker/buildx/pull/2222#issuecomment-1919092153
|
||||
- dependency-name: "docker/docs"
|
||||
labels:
|
||||
- "area/dependencies"
|
||||
- "dependencies"
|
||||
- "bot"
|
||||
|
||||
109
.github/labeler.yml
vendored
109
.github/labeler.yml
vendored
@@ -1,109 +0,0 @@
|
||||
|
||||
# Add 'area/project' label to changes in basic project documentation and .github folder, excluding .github/workflows
|
||||
area/project:
|
||||
- all:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- .github/**
|
||||
- LICENSE
|
||||
- AUTHORS
|
||||
- MAINTAINERS
|
||||
- PROJECT.md
|
||||
- README.md
|
||||
- .gitignore
|
||||
- codecov.yml
|
||||
- all-globs-to-all-files: '!.github/workflows/*'
|
||||
|
||||
# Add 'area/github-actions' label to changes in the .github/workflows folder
|
||||
area/ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '.github/workflows/**'
|
||||
|
||||
# Add 'area/bake' label to changes in the bake
|
||||
area/bake:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'bake/**'
|
||||
|
||||
# Add 'area/bake/compose' label to changes in the bake+compose
|
||||
area/bake/compose:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- bake/compose.go
|
||||
- bake/compose_test.go
|
||||
|
||||
# Add 'area/build' label to changes in build files
|
||||
area/build:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'build/**'
|
||||
|
||||
# Add 'area/builder' label to changes in builder files
|
||||
area/builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'builder/**'
|
||||
|
||||
# Add 'area/cli' label to changes in the CLI
|
||||
area/cli:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- cmd/**
|
||||
- commands/**
|
||||
|
||||
# Add 'area/controller' label to changes in the controller
|
||||
area/controller:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'controller/**'
|
||||
|
||||
# Add 'area/docs' label to markdown files in the docs folder
|
||||
area/docs:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'docs/**/*.md'
|
||||
|
||||
# Add 'area/dependencies' label to changes in go dependency files
|
||||
area/dependencies:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- go.mod
|
||||
- go.sum
|
||||
- vendor/**
|
||||
|
||||
# Add 'area/driver' label to changes in the driver folder
|
||||
area/driver:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/**'
|
||||
|
||||
# Add 'area/driver/docker' label to changes in the docker driver
|
||||
area/driver/docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/docker/**'
|
||||
|
||||
# Add 'area/driver/docker-container' label to changes in the docker-container driver
|
||||
area/driver/docker-container:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/docker-container/**'
|
||||
|
||||
# Add 'area/driver/kubernetes' label to changes in the kubernetes driver
|
||||
area/driver/kubernetes:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/kubernetes/**'
|
||||
|
||||
# Add 'area/driver/remote' label to changes in the remote driver
|
||||
area/driver/remote:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'driver/remote/**'
|
||||
|
||||
# Add 'area/hack' label to changes in the hack folder
|
||||
area/hack:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'hack/**'
|
||||
|
||||
# Add 'area/history' label to changes in history command
|
||||
area/history:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'commands/history/**'
|
||||
|
||||
# Add 'area/tests' label to changes in test files
|
||||
area/tests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- tests/**
|
||||
- '**/*_test.go'
|
||||
735
.github/releases.json
vendored
735
.github/releases.json
vendored
@@ -1,735 +0,0 @@
|
||||
{
|
||||
"latest": {
|
||||
"id": 90741208,
|
||||
"tag_name": "v0.10.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.2": {
|
||||
"id": 90741208,
|
||||
"tag_name": "v0.10.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/buildx-v0.10.2.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.2/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.1": {
|
||||
"id": 90346950,
|
||||
"tag_name": "v0.10.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/buildx-v0.10.1.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.0": {
|
||||
"id": 88388110,
|
||||
"tag_name": "v0.10.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.0-rc3": {
|
||||
"id": 88191592,
|
||||
"tag_name": "v0.10.0-rc3",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc3",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/buildx-v0.10.0-rc3.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc3/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.0-rc2": {
|
||||
"id": 86248476,
|
||||
"tag_name": "v0.10.0-rc2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.darwin-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v6.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm-v7.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-ppc64le.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-riscv64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.linux-s390x.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-amd64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.provenance.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/buildx-v0.10.0-rc2.windows-arm64.sbom.json",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc2/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.10.0-rc1": {
|
||||
"id": 85963900,
|
||||
"tag_name": "v0.10.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.10.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/buildx-v0.10.0-rc1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.10.0-rc1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.9.1": {
|
||||
"id": 74760068,
|
||||
"tag_name": "v0.9.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/buildx-v0.9.1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.9.0": {
|
||||
"id": 74546589,
|
||||
"tag_name": "v0.9.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/buildx-v0.9.0.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.9.0-rc2": {
|
||||
"id": 74052235,
|
||||
"tag_name": "v0.9.0-rc2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0-rc2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/buildx-v0.9.0-rc2.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc2/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.9.0-rc1": {
|
||||
"id": 73389692,
|
||||
"tag_name": "v0.9.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.9.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/buildx-v0.9.0-rc1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.9.0-rc1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.8.2": {
|
||||
"id": 63479740,
|
||||
"tag_name": "v0.8.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.2/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.8.1": {
|
||||
"id": 62289050,
|
||||
"tag_name": "v0.8.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/buildx-v0.8.1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.8.0": {
|
||||
"id": 61423774,
|
||||
"tag_name": "v0.8.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/buildx-v0.8.0.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.8.0-rc1": {
|
||||
"id": 60513568,
|
||||
"tag_name": "v0.8.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.8.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/buildx-v0.8.0-rc1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.8.0-rc1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.7.1": {
|
||||
"id": 54098347,
|
||||
"tag_name": "v0.7.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.7.0": {
|
||||
"id": 53109422,
|
||||
"tag_name": "v0.7.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/buildx-v0.7.0.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.7.0-rc1": {
|
||||
"id": 52726324,
|
||||
"tag_name": "v0.7.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.7.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/buildx-v0.7.0-rc1.windows-arm64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.7.0-rc1/checksums.txt"
|
||||
]
|
||||
},
|
||||
"v0.6.3": {
|
||||
"id": 48691641,
|
||||
"tag_name": "v0.6.3",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.3",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.windows-arm64.exe"
|
||||
]
|
||||
},
|
||||
"v0.6.2": {
|
||||
"id": 48207405,
|
||||
"tag_name": "v0.6.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.2/buildx-v0.6.2.windows-arm64.exe"
|
||||
]
|
||||
},
|
||||
"v0.6.1": {
|
||||
"id": 47064772,
|
||||
"tag_name": "v0.6.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.1/buildx-v0.6.1.windows-arm64.exe"
|
||||
]
|
||||
},
|
||||
"v0.6.0": {
|
||||
"id": 46343260,
|
||||
"tag_name": "v0.6.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0/buildx-v0.6.0.windows-arm64.exe"
|
||||
]
|
||||
},
|
||||
"v0.6.0-rc1": {
|
||||
"id": 46230351,
|
||||
"tag_name": "v0.6.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.6.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-riscv64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.windows-amd64.exe",
|
||||
"https://github.com/docker/buildx/releases/download/v0.6.0-rc1/buildx-v0.6.0-rc1.windows-arm64.exe"
|
||||
]
|
||||
},
|
||||
"v0.5.1": {
|
||||
"id": 35276550,
|
||||
"tag_name": "v0.5.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.darwin-universal",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.5.0": {
|
||||
"id": 35268960,
|
||||
"tag_name": "v0.5.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.darwin-universal",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0/buildx-v0.5.0.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.5.0-rc1": {
|
||||
"id": 35015334,
|
||||
"tag_name": "v0.5.0-rc1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.5.0-rc1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.5.0-rc1/buildx-v0.5.0-rc1.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.4.2": {
|
||||
"id": 30007794,
|
||||
"tag_name": "v0.4.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.2/buildx-v0.4.2.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.4.1": {
|
||||
"id": 26067509,
|
||||
"tag_name": "v0.4.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.1/buildx-v0.4.1.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.4.0": {
|
||||
"id": 26028174,
|
||||
"tag_name": "v0.4.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.4.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.4.0/buildx-v0.4.0.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.3.1": {
|
||||
"id": 20316235,
|
||||
"tag_name": "v0.3.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.3.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.1/buildx-v0.3.1.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.3.0": {
|
||||
"id": 19029664,
|
||||
"tag_name": "v0.3.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.3.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.2.2": {
|
||||
"id": 17671545,
|
||||
"tag_name": "v0.2.2",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.2",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.2/buildx-v0.2.2.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.2.1": {
|
||||
"id": 17582885,
|
||||
"tag_name": "v0.2.1",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.1",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.1/buildx-v0.2.1.windows-amd64.exe"
|
||||
]
|
||||
},
|
||||
"v0.2.0": {
|
||||
"id": 16965310,
|
||||
"tag_name": "v0.2.0",
|
||||
"html_url": "https://github.com/docker/buildx/releases/tag/v0.2.0",
|
||||
"assets": [
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.darwin-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-amd64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm-v6",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm-v7",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-arm64",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-ppc64le",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.linux-s390x",
|
||||
"https://github.com/docker/buildx/releases/download/v0.2.0/buildx-v0.2.0.windows-amd64.exe"
|
||||
]
|
||||
}
|
||||
}
|
||||
433
.github/workflows/build.yml
vendored
433
.github/workflows/build.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: build
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -22,313 +13,55 @@ on:
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v[0-9]*'
|
||||
paths-ignore:
|
||||
- '.github/releases.json'
|
||||
- 'README.md'
|
||||
- 'docs/**'
|
||||
|
||||
env:
|
||||
SETUP_BUILDX_VERSION: "edge"
|
||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
SCOUT_VERSION: "1.11.0"
|
||||
BUILDX_VERSION: "v0.10.0"
|
||||
BUILDKIT_IMAGE: "moby/buildkit:v0.11.1"
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
DESTDIR: "./bin"
|
||||
TEST_CACHE_SCOPE: "test"
|
||||
TESTFLAGS: "-v --parallel=6 --timeout=30m"
|
||||
GOTESTSUM_FORMAT: "standard-verbose"
|
||||
GO_VERSION: "1.23"
|
||||
GOTESTSUM_VERSION: "v1.9.0" # same as one in Dockerfile
|
||||
|
||||
jobs:
|
||||
test-integration:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
TESTFLAGS_DOCKER: "-v --parallel=1 --timeout=30m"
|
||||
TEST_IMAGE_BUILD: "0"
|
||||
TEST_IMAGE_ID: "buildx-tests"
|
||||
TEST_COVERAGE: "1"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
buildkit:
|
||||
- master
|
||||
- latest
|
||||
- buildx-stable-1
|
||||
- v0.19.0-rc2
|
||||
- v0.18.2
|
||||
- v0.17.2
|
||||
worker:
|
||||
- docker-container
|
||||
- remote
|
||||
pkg:
|
||||
- ./tests
|
||||
mode:
|
||||
- ""
|
||||
- experimental
|
||||
include:
|
||||
- worker: docker
|
||||
pkg: ./tests
|
||||
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||
pkg: ./tests
|
||||
- worker: docker
|
||||
pkg: ./tests
|
||||
mode: experimental
|
||||
- worker: docker+containerd # same as docker, but with containerd snapshotter
|
||||
pkg: ./tests
|
||||
mode: experimental
|
||||
- worker: "docker@26.1"
|
||||
pkg: ./tests
|
||||
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||
pkg: ./tests
|
||||
- worker: "docker@26.1"
|
||||
pkg: ./tests
|
||||
mode: experimental
|
||||
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
|
||||
pkg: ./tests
|
||||
mode: experimental
|
||||
test:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
echo "TESTREPORTS_NAME=${{ github.job }}-$(echo "${{ matrix.pkg }}-${{ matrix.buildkit }}-${{ matrix.worker }}-${{ matrix.mode }}" | tr -dc '[:alnum:]-\n\r' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV
|
||||
if [ -n "${{ matrix.buildkit }}" ]; then
|
||||
echo "TEST_BUILDKIT_TAG=${{ matrix.buildkit }}" >> $GITHUB_ENV
|
||||
fi
|
||||
testFlags="--run=//worker=$(echo "${{ matrix.worker }}" | sed 's/\+/\\+/g')$"
|
||||
case "${{ matrix.worker }}" in
|
||||
docker | docker+containerd | docker@* | docker+containerd@*)
|
||||
echo "TESTFLAGS=${{ env.TESTFLAGS_DOCKER }} $testFlags" >> $GITHUB_ENV
|
||||
;;
|
||||
*)
|
||||
echo "TESTFLAGS=${{ env.TESTFLAGS }} $testFlags" >> $GITHUB_ENV
|
||||
;;
|
||||
esac
|
||||
if [[ "${{ matrix.worker }}" == "docker"* ]]; then
|
||||
echo "TEST_DOCKERD=1" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.mode }}" = "experimental" ]; then
|
||||
echo "TEST_BUILDX_EXPERIMENTAL=1" >> $GITHUB_ENV
|
||||
fi
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build test image
|
||||
uses: docker/bake-action@v6
|
||||
name: Test
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
source: .
|
||||
targets: integration-test
|
||||
targets: test
|
||||
set: |
|
||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
./hack/test
|
||||
env:
|
||||
TEST_REPORT_SUFFIX: "-${{ env.TESTREPORTS_NAME }}"
|
||||
TESTPKGS: "${{ matrix.pkg }}"
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
directory: ./bin/testreports
|
||||
flags: integration
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
disable_file_fixes: true
|
||||
-
|
||||
name: Generate annotations
|
||||
if: always()
|
||||
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||
with:
|
||||
directory: ./bin/testreports
|
||||
-
|
||||
name: Upload test reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||
path: ./bin/testreports
|
||||
|
||||
test-unit:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-24.04
|
||||
- macos-14
|
||||
- windows-2022
|
||||
env:
|
||||
SKIP_INTEGRATION_TESTS: 1
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "${{ env.GO_VERSION }}"
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
testreportsName=${{ github.job }}--${{ matrix.os }}
|
||||
testreportsBaseDir=./bin/testreports
|
||||
testreportsDir=$testreportsBaseDir/$testreportsName
|
||||
echo "TESTREPORTS_NAME=$testreportsName" >> $GITHUB_ENV
|
||||
echo "TESTREPORTS_BASEDIR=$testreportsBaseDir" >> $GITHUB_ENV
|
||||
echo "TESTREPORTS_DIR=$testreportsDir" >> $GITHUB_ENV
|
||||
mkdir -p $testreportsDir
|
||||
shell: bash
|
||||
-
|
||||
name: Install gotestsum
|
||||
run: |
|
||||
go install gotest.tools/gotestsum@${{ env.GOTESTSUM_VERSION }}
|
||||
-
|
||||
name: Test
|
||||
env:
|
||||
TMPDIR: ${{ runner.temp }}
|
||||
run: |
|
||||
gotestsum \
|
||||
--jsonfile="${{ env.TESTREPORTS_DIR }}/go-test-report.json" \
|
||||
--junitfile="${{ env.TESTREPORTS_DIR }}/junit-report.xml" \
|
||||
--packages="./..." \
|
||||
-- \
|
||||
"-mod=vendor" \
|
||||
"-coverprofile" "${{ env.TESTREPORTS_DIR }}/coverage.txt" \
|
||||
"-covermode" "atomic" ${{ env.TESTFLAGS }}
|
||||
shell: bash
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
directory: ${{ env.TESTREPORTS_DIR }}
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
disable_file_fixes: true
|
||||
-
|
||||
name: Generate annotations
|
||||
if: always()
|
||||
uses: crazy-max/.github/.github/actions/gotest-annotations@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||
with:
|
||||
directory: ${{ env.TESTREPORTS_DIR }}
|
||||
-
|
||||
name: Upload test reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||
|
||||
test-bsd-unit:
|
||||
runs-on: ubuntu-22.04
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- freebsd
|
||||
- openbsd
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Cache Vagrant boxes
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.vagrant.d/boxes
|
||||
key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-vagrant-${{ matrix.os }}-
|
||||
-
|
||||
name: Install vagrant
|
||||
run: |
|
||||
set -x
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||
sudo systemctl enable --now libvirtd
|
||||
sudo chmod a+rw /var/run/libvirt/libvirt-sock
|
||||
vagrant plugin install vagrant-libvirt
|
||||
vagrant --version
|
||||
-
|
||||
name: Set up vagrant
|
||||
run: |
|
||||
ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
|
||||
vagrant up --no-tty
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
|
||||
vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
|
||||
*.cache-from=type=gha,scope=test
|
||||
*.cache-to=type=gha,scope=test
|
||||
-
|
||||
name: Upload coverage
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit,${{ matrix.os }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
env:
|
||||
RUNNER_OS: ${{ matrix.os }}
|
||||
directory: ${{ env.DESTDIR }}/coverage
|
||||
|
||||
govulncheck:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# same as global permission
|
||||
contents: read
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Run
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: govulncheck
|
||||
env:
|
||||
GOVULNCHECK_FORMAT: sarif
|
||||
-
|
||||
name: Upload SARIF report
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||
|
||||
prepare-binaries:
|
||||
runs-on: ubuntu-24.04
|
||||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
@@ -340,13 +73,13 @@ jobs:
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
binaries:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- prepare-binaries
|
||||
- prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare-binaries.outputs.matrix) }}
|
||||
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
@@ -355,16 +88,16 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
@@ -376,33 +109,33 @@ jobs:
|
||||
CACHE_TO: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: buildx-${{ env.PLATFORM_PAIR }}
|
||||
name: buildx
|
||||
path: ${{ env.DESTDIR }}/*
|
||||
if-no-files-found: error
|
||||
|
||||
bin-image:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- test-integration
|
||||
- test-unit
|
||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_SLUG }}
|
||||
@@ -414,75 +147,39 @@ jobs:
|
||||
-
|
||||
name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push image
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
cwd://${{ steps.meta.outputs.bake-file }}
|
||||
${{ steps.meta.outputs.bake-file }}
|
||||
targets: image-cross
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
sbom: true
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=bin-image
|
||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||
|
||||
scout:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
permissions:
|
||||
# same as global permission
|
||||
contents: read
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
needs:
|
||||
- bin-image
|
||||
steps:
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
-
|
||||
name: Scout
|
||||
id: scout
|
||||
uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4
|
||||
with:
|
||||
version: ${{ env.SCOUT_VERSION }}
|
||||
format: sarif
|
||||
image: registry://${{ env.REPO_SLUG }}:master
|
||||
-
|
||||
name: Upload SARIF report
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ steps.scout.outputs.result-file }}
|
||||
*.attest=type=sbom
|
||||
*.attest=type=provenance,mode=max,builder-id=https://github.com/${{ env.GITHUB_REPOSITORY }}/actions/runs/${{ env.GITHUB_RUN_ID }}
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required to create GitHub release
|
||||
contents: write
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- test-integration
|
||||
- test-unit
|
||||
- binaries
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Download binaries
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: buildx
|
||||
path: ${{ env.DESTDIR }}
|
||||
pattern: buildx-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Create checksums
|
||||
run: ./hack/hash-files
|
||||
@@ -497,9 +194,33 @@ jobs:
|
||||
-
|
||||
name: GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
draft: true
|
||||
files: ${{ env.DESTDIR }}/*
|
||||
|
||||
buildkit-edge:
|
||||
runs-on: ubuntu-22.04
|
||||
continue-on-error: true
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=moby/buildkit:master
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
# Just run a bake target to check eveything runs fine
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: binaries
|
||||
|
||||
50
.github/workflows/codeql.yml
vendored
50
.github/workflows/codeql.yml
vendored
@@ -1,50 +0,0 @@
|
||||
name: codeql
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v[0-9]*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23"
|
||||
|
||||
jobs:
|
||||
codeql:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read
|
||||
security-events: write
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go
|
||||
-
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
-
|
||||
name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:go"
|
||||
67
.github/workflows/docs-release.yml
vendored
67
.github/workflows/docs-release.yml
vendored
@@ -1,39 +1,18 @@
|
||||
name: docs-release
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Git tag'
|
||||
required: true
|
||||
release:
|
||||
types:
|
||||
- released
|
||||
|
||||
env:
|
||||
SETUP_BUILDX_VERSION: "edge"
|
||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
|
||||
jobs:
|
||||
open-pr:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ (github.event.release.prerelease != true || github.event.inputs.tag != '') && github.repository == 'docker/buildx' }}
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-22.04
|
||||
if: "!github.event.release.prerelease"
|
||||
steps:
|
||||
-
|
||||
name: Checkout docs repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||
repository: docker/docs
|
||||
@@ -41,51 +20,39 @@ jobs:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
rm -rf ./data/buildx/*
|
||||
if [ -n "${{ github.event.inputs.tag }}" ]; then
|
||||
echo "RELEASE_NAME=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "RELEASE_NAME=${{ github.event.release.name }}" >> $GITHUB_ENV
|
||||
fi
|
||||
rm -rf ./_data/buildx/*
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Generate yaml
|
||||
uses: docker/bake-action@v6
|
||||
name: Build docs
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
|
||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ github.event.release.name }}
|
||||
targets: update-docs
|
||||
provenance: false
|
||||
set: |
|
||||
*.output=/tmp/buildx-docs
|
||||
env:
|
||||
DOCS_FORMATS: yaml
|
||||
-
|
||||
name: Copy yaml
|
||||
name: Copy files
|
||||
run: |
|
||||
cp /tmp/buildx-docs/out/reference/*.yaml ./data/buildx/
|
||||
cp /tmp/buildx-docs/out/reference/*.yaml ./_data/buildx/
|
||||
-
|
||||
name: Update vendor
|
||||
name: Commit changes
|
||||
run: |
|
||||
make vendor
|
||||
env:
|
||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||
git add -A .
|
||||
-
|
||||
name: Create PR on docs repo
|
||||
uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6
|
||||
uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04
|
||||
with:
|
||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||
push-to-fork: docker-tools-robot/docker.github.io
|
||||
commit-message: "vendor: github.com/docker/buildx ${{ env.RELEASE_NAME }}"
|
||||
commit-message: "build: update buildx reference to ${{ github.event.release.name }}"
|
||||
signoff: true
|
||||
branch: dispatch/buildx-ref-${{ env.RELEASE_NAME }}
|
||||
branch: dispatch/buildx-ref-${{ github.event.release.name }}
|
||||
delete-branch: true
|
||||
title: Update buildx reference to ${{ env.RELEASE_NAME }}
|
||||
title: Update buildx reference to ${{ github.event.release.name }}
|
||||
body: |
|
||||
Update the buildx reference documentation to keep in sync with the latest release `${{ env.RELEASE_NAME }}`
|
||||
Update the buildx reference documentation to keep in sync with the latest release `${{ github.event.release.name }}`
|
||||
draft: false
|
||||
|
||||
32
.github/workflows/docs-upstream.yml
vendored
32
.github/workflows/docs-upstream.yml
vendored
@@ -3,15 +3,6 @@
|
||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
||||
name: docs-upstream
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -29,27 +20,23 @@ on:
|
||||
- '.github/workflows/docs-upstream.yml'
|
||||
- 'docs/**'
|
||||
|
||||
env:
|
||||
SETUP_BUILDX_VERSION: "edge"
|
||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
|
||||
jobs:
|
||||
docs-yaml:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
version: latest
|
||||
-
|
||||
name: Build reference YAML docs
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: update-docs
|
||||
provenance: false
|
||||
set: |
|
||||
*.output=/tmp/buildx-docs
|
||||
*.cache-from=type=gha,scope=docs-yaml
|
||||
@@ -58,7 +45,7 @@ jobs:
|
||||
DOCS_FORMATS: yaml
|
||||
-
|
||||
name: Upload reference YAML docs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docs-yaml
|
||||
path: /tmp/buildx-docs/out/reference
|
||||
@@ -69,7 +56,6 @@ jobs:
|
||||
needs:
|
||||
- docs-yaml
|
||||
with:
|
||||
module-name: docker/buildx
|
||||
repo: https://github.com/${{ github.repository }}
|
||||
data-files-id: docs-yaml
|
||||
data-files-folder: buildx
|
||||
create-placeholder-stubs: true
|
||||
|
||||
179
.github/workflows/e2e.yml
vendored
179
.github/workflows/e2e.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: e2e
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -20,31 +11,31 @@ on:
|
||||
- 'master'
|
||||
- 'v[0-9]*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v[0-9]*'
|
||||
paths-ignore:
|
||||
- '.github/releases.json'
|
||||
- 'README.md'
|
||||
- 'docs/**'
|
||||
|
||||
env:
|
||||
SETUP_BUILDX_VERSION: "edge"
|
||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
DESTDIR: "./bin"
|
||||
K3S_VERSION: "v1.21.2-k3s1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
version: latest
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: binaries
|
||||
set: |
|
||||
@@ -57,7 +48,7 @@ jobs:
|
||||
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: binary
|
||||
path: ${{ env.DESTDIR }}/build
|
||||
@@ -93,10 +84,6 @@ jobs:
|
||||
driver-opt: qemu.install=true
|
||||
- driver: remote
|
||||
endpoint: tcp://localhost:1234
|
||||
- driver: docker-container
|
||||
metadata-provenance: max
|
||||
- driver: docker-container
|
||||
metadata-warnings: true
|
||||
exclude:
|
||||
- driver: docker
|
||||
multi-node: mnode-true
|
||||
@@ -111,14 +98,14 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v2
|
||||
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
||||
-
|
||||
name: Install buildx
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: binary
|
||||
path: /home/runner/.docker/cli-plugins
|
||||
@@ -144,18 +131,70 @@ jobs:
|
||||
else
|
||||
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ -n "${{ matrix.metadata-provenance }}" ]; then
|
||||
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ -n "${{ matrix.metadata-warnings }}" ]; then
|
||||
echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
|
||||
fi
|
||||
-
|
||||
name: Install k3s
|
||||
if: matrix.driver == 'kubernetes'
|
||||
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
version: ${{ env.K3S_VERSION }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
let wait = function(milliseconds) {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (typeof(milliseconds) !== 'number') {
|
||||
throw new Error('milleseconds not a number');
|
||||
}
|
||||
setTimeout(() => resolve("done!"), milliseconds)
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const kubeconfig="/tmp/buildkit-k3s/kubeconfig.yaml";
|
||||
core.info(`storing kubeconfig in ${kubeconfig}`);
|
||||
|
||||
await exec.exec('docker', ["run", "-d",
|
||||
"--privileged",
|
||||
"--name=buildkit-k3s",
|
||||
"-e", "K3S_KUBECONFIG_OUTPUT="+kubeconfig,
|
||||
"-e", "K3S_KUBECONFIG_MODE=666",
|
||||
"-v", "/tmp/buildkit-k3s:/tmp/buildkit-k3s",
|
||||
"-p", "6443:6443",
|
||||
"-p", "80:80",
|
||||
"-p", "443:443",
|
||||
"-p", "8080:8080",
|
||||
"rancher/k3s:${{ env.K3S_VERSION }}", "server"
|
||||
]);
|
||||
await wait(10000);
|
||||
|
||||
core.exportVariable('KUBECONFIG', kubeconfig);
|
||||
|
||||
let nodeName;
|
||||
for (let count = 1; count <= 5; count++) {
|
||||
try {
|
||||
const nodeNameOutput = await exec.getExecOutput("kubectl get nodes --no-headers -oname");
|
||||
nodeName = nodeNameOutput.stdout
|
||||
} catch (error) {
|
||||
core.info(`Unable to resolve node name (${error.message}). Attempt ${count} of 5.`)
|
||||
} finally {
|
||||
if (nodeName) {
|
||||
break;
|
||||
}
|
||||
await wait(5000);
|
||||
}
|
||||
}
|
||||
if (!nodeName) {
|
||||
throw new Error(`Unable to resolve node name after 5 attempts.`);
|
||||
}
|
||||
|
||||
await exec.exec(`kubectl wait --for=condition=Ready ${nodeName}`);
|
||||
} catch (error) {
|
||||
core.setFailed(error.message);
|
||||
}
|
||||
-
|
||||
name: Print KUBECONFIG
|
||||
if: matrix.driver == 'kubernetes'
|
||||
run: |
|
||||
yq ${{ env.KUBECONFIG }}
|
||||
-
|
||||
name: Launch remote buildkitd
|
||||
if: matrix.driver == 'remote'
|
||||
@@ -177,75 +216,3 @@ jobs:
|
||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||
ENDPOINT: ${{ matrix.endpoint }}
|
||||
PLATFORMS: ${{ matrix.platforms }}
|
||||
|
||||
bake:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build
|
||||
env:
|
||||
DOCKER_BUILD_CHECKS_ANNOTATIONS: false
|
||||
DOCKER_BUILD_SUMMARY: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
-
|
||||
# https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
|
||||
source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
|
||||
overrides: |
|
||||
*.output=/tmp/bake-build
|
||||
-
|
||||
# https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
|
||||
source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
|
||||
targets: |
|
||||
ld64-static-tgz
|
||||
overrides: |
|
||||
ld64-static-tgz.output=type=local,dest=./dist
|
||||
ld64-static-tgz.platform=linux/amd64
|
||||
ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
|
||||
ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
|
||||
-
|
||||
# https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
|
||||
source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
|
||||
targets: |
|
||||
tests-buildkit
|
||||
envs: |
|
||||
BUILDKIT_REFS=v0.18.2
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Environment variables
|
||||
if: matrix.envs != ''
|
||||
run: |
|
||||
for l in "${{ matrix.envs }}"; do
|
||||
echo "${l?}" >> $GITHUB_ENV
|
||||
done
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Install buildx
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: binary
|
||||
path: /home/runner/.docker/cli-plugins
|
||||
-
|
||||
name: Fix perms and check
|
||||
run: |
|
||||
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
|
||||
docker buildx version
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: ${{ matrix.source }}
|
||||
targets: ${{ matrix.targets }}
|
||||
set: ${{ matrix.overrides }}
|
||||
|
||||
32
.github/workflows/labeler.yml
vendored
32
.github/workflows/labeler.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: labeler
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# same as global permission
|
||||
contents: read
|
||||
# required for writing labels
|
||||
pull-requests: write
|
||||
steps:
|
||||
-
|
||||
name: Run
|
||||
uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
98
.github/workflows/validate.yml
vendored
98
.github/workflows/validate.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: validate
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -22,89 +13,30 @@ on:
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '.github/releases.json'
|
||||
|
||||
env:
|
||||
SETUP_BUILDX_VERSION: "edge"
|
||||
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v[0-9]*'
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
includes: ${{ steps.matrix.outputs.includes }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Matrix
|
||||
id: matrix
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let def = {};
|
||||
await core.group(`Parsing definition`, async () => {
|
||||
const printEnv = Object.assign({}, process.env, {
|
||||
GOLANGCI_LINT_MULTIPLATFORM: process.env.GITHUB_REPOSITORY === 'docker/buildx' ? '1' : ''
|
||||
});
|
||||
const resPrint = await exec.getExecOutput('docker', ['buildx', 'bake', 'validate', '--print'], {
|
||||
ignoreReturnCode: true,
|
||||
env: printEnv
|
||||
});
|
||||
if (resPrint.stderr.length > 0 && resPrint.exitCode != 0) {
|
||||
throw new Error(res.stderr);
|
||||
}
|
||||
def = JSON.parse(resPrint.stdout.trim());
|
||||
});
|
||||
await core.group(`Generating matrix`, async () => {
|
||||
const includes = [];
|
||||
for (const targetName of Object.keys(def.target)) {
|
||||
const target = def.target[targetName];
|
||||
if (target.platforms && target.platforms.length > 0) {
|
||||
target.platforms.forEach(platform => {
|
||||
includes.push({
|
||||
target: targetName,
|
||||
platform: platform
|
||||
});
|
||||
});
|
||||
} else {
|
||||
includes.push({
|
||||
target: targetName
|
||||
});
|
||||
}
|
||||
}
|
||||
core.info(JSON.stringify(includes, null, 2));
|
||||
core.setOutput('includes', JSON.stringify(includes));
|
||||
});
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- prepare
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.prepare.outputs.includes) }}
|
||||
target:
|
||||
- lint
|
||||
- validate-vendor
|
||||
- validate-docs
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
|
||||
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
|
||||
fi
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
version: latest
|
||||
-
|
||||
name: Validate
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
name: Run
|
||||
run: |
|
||||
make ${{ matrix.target }}
|
||||
|
||||
115
.golangci.yml
115
.golangci.yml
@@ -1,119 +1,40 @@
|
||||
run:
|
||||
timeout: 30m
|
||||
timeout: 10m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
|
||||
modules-download-mode: vendor
|
||||
# default uses Go version from the go.mod file, fallback on the env var
|
||||
# `GOVERSION`, fallback on 1.17: https://golangci-lint.run/usage/configuration/#run-configuration
|
||||
go: "1.23"
|
||||
|
||||
build-tags:
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- depguard
|
||||
- forbidigo
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- deadcode
|
||||
- depguard
|
||||
- goimports
|
||||
- ineffassign
|
||||
- makezero
|
||||
- misspell
|
||||
- noctx
|
||||
- nolintlint
|
||||
- unused
|
||||
- varcheck
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- typecheck
|
||||
- unused
|
||||
- whitespace
|
||||
- nolintlint
|
||||
disable-all: true
|
||||
|
||||
linters-settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- "ifElseChain"
|
||||
- "assignOp"
|
||||
- "appendAssign"
|
||||
- "singleCaseSwitch"
|
||||
- "exitAfterDefer" # FIXME
|
||||
importas:
|
||||
alias:
|
||||
# Enforce alias to prevent it accidentally being used instead of
|
||||
# buildkit errdefs package (or vice-versa).
|
||||
- pkg: "github.com/containerd/errdefs"
|
||||
alias: "cerrdefs"
|
||||
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
alias: "ocispecs"
|
||||
- pkg: "github.com/opencontainers/go-digest"
|
||||
alias: "digest"
|
||||
govet:
|
||||
enable:
|
||||
- nilness
|
||||
- unusedwrite
|
||||
# enable-all: true
|
||||
# disable:
|
||||
# - fieldalignment
|
||||
# - shadow
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "github.com/containerd/containerd/errdefs"
|
||||
desc: The containerd errdefs package was migrated to a separate module. Use github.com/containerd/errdefs instead.
|
||||
- pkg: "github.com/containerd/containerd/log"
|
||||
desc: The containerd log package was migrated to a separate module. Use github.com/containerd/log instead.
|
||||
- pkg: "github.com/containerd/containerd/platforms"
|
||||
desc: The containerd platforms package was migrated to a separate module. Use github.com/containerd/platforms instead.
|
||||
- pkg: "io/ioutil"
|
||||
desc: The io/ioutil package has been deprecated.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||
gosec:
|
||||
excludes:
|
||||
- G204 # Audit use of command execution
|
||||
- G402 # TLS MinVersion too low
|
||||
- G115 # integer overflow conversion (TODO: verify these)
|
||||
config:
|
||||
G306: "0644"
|
||||
testifylint:
|
||||
disable:
|
||||
# disable rules that reduce the test condition
|
||||
- "empty"
|
||||
- "bool-compare"
|
||||
- "len"
|
||||
- "negative-positive"
|
||||
|
||||
list-type: blacklist
|
||||
include-go-root: true
|
||||
packages:
|
||||
# The io/ioutil package has been deprecated.
|
||||
# https://go.dev/doc/go1.16#ioutil
|
||||
- io/ioutil
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: "stutters"
|
||||
- linters:
|
||||
- revive
|
||||
text: "empty-block"
|
||||
- linters:
|
||||
- revive
|
||||
text: "superfluous-else"
|
||||
- linters:
|
||||
- revive
|
||||
text: "unused-parameter"
|
||||
- linters:
|
||||
- revive
|
||||
text: "redefines-builtin-id"
|
||||
- linters:
|
||||
- revive
|
||||
text: "if-return"
|
||||
|
||||
# show all
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
||||
14
.mailmap
14
.mailmap
@@ -1,25 +1,11 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
|
||||
69
AUTHORS
69
AUTHORS
@@ -1,112 +1,45 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
accetto <34798830+accetto@users.noreply.github.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alex Couture-Beil <alex@earthly.dev>
|
||||
Andrew Haines <andrew.haines@zencargo.com>
|
||||
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||
Anthony Poschen <zanven42@gmail.com>
|
||||
Arnold Sobanski <arnold@l4g.dev>
|
||||
Artur Klauser <Artur.Klauser@computer.org>
|
||||
Avi Deitcher <avi@deitcher.net>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Ben Peachey <potherca@gmail.com>
|
||||
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||
Batuhan Apaydın <developerguy2@gmail.com>
|
||||
Bin Du <bindu@microsoft.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bryce Lampe <bryce@pulumi.com>
|
||||
Cameron Adams <pnzreba@gmail.com>
|
||||
Christian Dupuis <cd@atomist.com>
|
||||
Cory Snider <csnider@mirantis.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
David Gageot <david.gageot@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Scott <dave@recoil.org>
|
||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
Devin Bayer <dev@doubly.so>
|
||||
Djordje Lukic <djordje.lukic@docker.com>
|
||||
Dmitry Makovey <dmakovey@gitlab.com>
|
||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||
Donghui Wang <977675308@qq.com>
|
||||
Doug Borg <dougborg@apple.com>
|
||||
Edgar Lee <edgarl@netflix.com>
|
||||
Eli Treuherz <et@arenko.group>
|
||||
Eliott Wiener <eliottwiener@gmail.com>
|
||||
Elran Shefer <elran.shefer@velocity.tech>
|
||||
faust <faustin@fala.red>
|
||||
Felipe Santos <felipecassiors@gmail.com>
|
||||
Felix de Souza <fdesouza@palantir.com>
|
||||
Fernando Miguel <github@FernandoMiguel.net>
|
||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||
gracenoah <gracenoahgh@gmail.com>
|
||||
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||
guoguangwu <guoguangwu@magic-shield.com>
|
||||
Hollow Man <hollowman@hollowman.ml>
|
||||
Ian King'ori <kingorim.ian@gmail.com>
|
||||
idnandre <andre@idntimes.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||
Jonathan Piché <jpiche@coveo.com>
|
||||
Justin Chadwell <me@jedevc.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
khs1994 <khs1994@khs1994.com>
|
||||
Kijima Daigo <norimaking777@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Kotaro Adachi <k33asby@gmail.com>
|
||||
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||
l00397676 <lujingxiao@huawei.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Laurent Goderre <laurent.goderre@docker.com>
|
||||
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||
Michal Augustyn <michal.augustyn@mail.com>
|
||||
Milas Bowman <milas.bowman@docker.com>
|
||||
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||
Moleus <fafufuburr@gmail.com>
|
||||
Nick Santos <nick.santos@docker.com>
|
||||
Nick Sieger <nick@nicksieger.com>
|
||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Niklas Gehlen <niklas@namespacelabs.com>
|
||||
Patrick Van Stee <patrick@vanstee.me>
|
||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||
Phong Tran <tran.pho@northeastern.edu>
|
||||
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
robertlestak <robert.lestak@umusic.com>
|
||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||
Sean P. Kane <spkane00@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Simon A. Eugster <simon.eu@gmail.com>
|
||||
Solomon Hykes <sh.github.6811@hykes.org>
|
||||
Sumner Warren <sumner.warren@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tim Smith <tismith@rvohealth.com>
|
||||
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||
Tyler Smith <tylerlwsmith@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Ulysses Souza <ulyssessouza@gmail.com>
|
||||
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||
Wang Jinglei <morlay.null@gmail.com>
|
||||
Wei <daviseago@gmail.com>
|
||||
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||
Xiang Dai <764524258@qq.com>
|
||||
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||
zelahi <elahi.zuhayr@gmail.com>
|
||||
Zero <tobewhatwewant@gmail.com>
|
||||
zhyon404 <zhyong4@gmail.com>
|
||||
Zsolt <zsolt.szeberenyi@figured.com>
|
||||
|
||||
99
Dockerfile
99
Dockerfile
@@ -1,27 +1,15 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# syntax=docker/dockerfile-upstream:1.5.0
|
||||
|
||||
ARG GO_VERSION=1.23
|
||||
ARG ALPINE_VERSION=3.21
|
||||
ARG XX_VERSION=1.6.1
|
||||
ARG GO_VERSION=1.19
|
||||
ARG XX_VERSION=1.1.2
|
||||
ARG DOCKERD_VERSION=20.10.14
|
||||
|
||||
# for testing
|
||||
ARG DOCKER_VERSION=27.5.0
|
||||
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||
ARG GOTESTSUM_VERSION=v1.12.0
|
||||
ARG REGISTRY_VERSION=2.8.3
|
||||
ARG BUILDKIT_VERSION=v0.19.0-rc2
|
||||
ARG UNDOCK_VERSION=0.9.0
|
||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||
|
||||
# xx is a helper for cross-compilation
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
|
||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt
|
||||
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt
|
||||
FROM registry:$REGISTRY_VERSION AS registry
|
||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||
|
||||
FROM golatest AS gobase
|
||||
COPY --from=xx / /
|
||||
@@ -30,39 +18,6 @@ ENV GOFLAGS=-mod=vendor
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /src
|
||||
|
||||
FROM gobase AS gotestsum
|
||||
ARG GOTESTSUM_VERSION
|
||||
ENV GOFLAGS=""
|
||||
RUN --mount=target=/root/.cache,type=cache <<EOT
|
||||
set -ex
|
||||
go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}"
|
||||
go install "github.com/wadey/gocovmerge@latest"
|
||||
mkdir /out
|
||||
/go/bin/gotestsum --version
|
||||
mv /go/bin/gotestsum /out
|
||||
mv /go/bin/gocovmerge /out
|
||||
EOT
|
||||
COPY --chmod=755 <<"EOF" /out/gotestsumandcover
|
||||
#!/bin/sh
|
||||
set -x
|
||||
if [ -z "$GO_TEST_COVERPROFILE" ]; then
|
||||
exec gotestsum "$@"
|
||||
fi
|
||||
coverdir="$(dirname "$GO_TEST_COVERPROFILE")"
|
||||
mkdir -p "$coverdir/helpers"
|
||||
gotestsum "$@" "-coverprofile=$GO_TEST_COVERPROFILE"
|
||||
ecode=$?
|
||||
go tool covdata textfmt -i=$coverdir/helpers -o=$coverdir/helpers-report.txt
|
||||
gocovmerge "$coverdir/helpers-report.txt" "$GO_TEST_COVERPROFILE" > "$coverdir/merged-report.txt"
|
||||
mv "$coverdir/merged-report.txt" "$GO_TEST_COVERPROFILE"
|
||||
rm "$coverdir/helpers-report.txt"
|
||||
for f in "$coverdir/helpers"/*; do
|
||||
rm "$f"
|
||||
done
|
||||
rmdir "$coverdir/helpers"
|
||||
exit $ecode
|
||||
EOF
|
||||
|
||||
FROM gobase AS buildx-version
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
set -e
|
||||
@@ -73,7 +28,6 @@ EOT
|
||||
|
||||
FROM gobase AS buildx-build
|
||||
ARG TARGETPLATFORM
|
||||
ARG GO_EXTRA_FLAGS
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
@@ -81,12 +35,10 @@ RUN --mount=type=bind,target=. \
|
||||
set -e
|
||||
xx-go --wrap
|
||||
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
|
||||
file /usr/bin/docker-buildx
|
||||
xx-verify --static /usr/bin/docker-buildx
|
||||
EOT
|
||||
|
||||
FROM gobase AS test
|
||||
ENV SKIP_INTEGRATION_TESTS=1
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
@@ -100,9 +52,7 @@ FROM scratch AS binaries-unix
|
||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
|
||||
|
||||
FROM binaries-unix AS binaries-darwin
|
||||
FROM binaries-unix AS binaries-freebsd
|
||||
FROM binaries-unix AS binaries-linux
|
||||
FROM binaries-unix AS binaries-openbsd
|
||||
|
||||
FROM scratch AS binaries-windows
|
||||
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
|
||||
@@ -111,35 +61,8 @@ FROM binaries-$TARGETOS AS binaries
|
||||
# enable scanning for this stage
|
||||
ARG BUILDKIT_SBOM_SCAN_STAGE=true
|
||||
|
||||
FROM gobase AS integration-test-base
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||
RUN apk add --no-cache \
|
||||
btrfs-progs \
|
||||
e2fsprogs \
|
||||
e2fsprogs-extra \
|
||||
ip6tables \
|
||||
iptables \
|
||||
openssl \
|
||||
shadow-uidmap \
|
||||
xfsprogs \
|
||||
xz
|
||||
COPY --link --from=gotestsum /out /usr/bin/
|
||||
COPY --link --from=registry /bin/registry /usr/bin/
|
||||
COPY --link --from=docker-engine / /usr/bin/
|
||||
COPY --link --from=docker-cli / /usr/bin/
|
||||
COPY --link --from=docker-engine-alt / /opt/docker-alt-26/
|
||||
COPY --link --from=docker-cli-alt / /opt/docker-alt-26/
|
||||
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
|
||||
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
|
||||
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
|
||||
COPY --link --from=binaries /buildx /usr/bin/
|
||||
ENV TEST_DOCKER_EXTRA="docker@26.1=/opt/docker-alt-26"
|
||||
|
||||
FROM integration-test-base AS integration-test
|
||||
COPY . .
|
||||
|
||||
# Release
|
||||
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
|
||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
||||
WORKDIR /work
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=binaries \
|
||||
@@ -153,8 +76,8 @@ FROM scratch AS release
|
||||
COPY --from=releaser /out/ /
|
||||
|
||||
# Shell
|
||||
FROM docker:$DOCKER_VERSION AS dockerd-release
|
||||
FROM alpine:${ALPINE_VERSION} AS shell
|
||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||
FROM alpine AS shell
|
||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||
|
||||
@@ -153,7 +153,6 @@ made through a pull request.
|
||||
"akihirosuda",
|
||||
"crazy-max",
|
||||
"jedevc",
|
||||
"jsternberg",
|
||||
"tiborvass",
|
||||
"tonistiigi",
|
||||
]
|
||||
@@ -195,11 +194,6 @@ made through a pull request.
|
||||
Email = "me@jedevc.com"
|
||||
GitHub = "jedevc"
|
||||
|
||||
[people.jsternberg]
|
||||
Name = "Jonathan Sternberg"
|
||||
Email = "jonathan.sternberg@docker.com"
|
||||
GitHub = "jsternberg"
|
||||
|
||||
[people.thajeztah]
|
||||
Name = "Sebastiaan van Stijn"
|
||||
Email = "github@gone.nl"
|
||||
|
||||
40
Makefile
40
Makefile
@@ -8,8 +8,6 @@ endif
|
||||
|
||||
export BUILDX_CMD ?= docker buildx
|
||||
|
||||
BAKE_TARGETS := binaries binaries-cross lint lint-gopls validate-vendor validate-docs validate-authors validate-generated-files
|
||||
|
||||
.PHONY: all
|
||||
all: binaries
|
||||
|
||||
@@ -21,9 +19,13 @@ build:
|
||||
shell:
|
||||
./hack/shell
|
||||
|
||||
.PHONY: $(BAKE_TARGETS)
|
||||
$(BAKE_TARGETS):
|
||||
$(BUILDX_CMD) bake $@
|
||||
.PHONY: binaries
|
||||
binaries:
|
||||
$(BUILDX_CMD) bake binaries
|
||||
|
||||
.PHONY: binaries-cross
|
||||
binaries-cross:
|
||||
$(BUILDX_CMD) bake binaries-cross
|
||||
|
||||
.PHONY: install
|
||||
install: binaries
|
||||
@@ -35,19 +37,27 @@ release:
|
||||
./hack/release
|
||||
|
||||
.PHONY: validate-all
|
||||
validate-all: lint test validate-vendor validate-docs validate-generated-files
|
||||
validate-all: lint test validate-vendor validate-docs
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(BUILDX_CMD) bake lint
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
./hack/test
|
||||
$(BUILDX_CMD) bake test
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit:
|
||||
TESTPKGS=./... SKIP_INTEGRATION_TESTS=1 ./hack/test
|
||||
.PHONY: validate-vendor
|
||||
validate-vendor:
|
||||
$(BUILDX_CMD) bake validate-vendor
|
||||
|
||||
.PHONY: test
|
||||
test-integration:
|
||||
TESTPKGS=./tests ./hack/test
|
||||
.PHONY: validate-docs
|
||||
validate-docs:
|
||||
$(BUILDX_CMD) bake validate-docs
|
||||
|
||||
.PHONY: validate-authors
|
||||
validate-authors:
|
||||
$(BUILDX_CMD) bake validate-authors
|
||||
|
||||
.PHONY: test-driver
|
||||
test-driver:
|
||||
@@ -68,7 +78,3 @@ authors:
|
||||
.PHONY: mod-outdated
|
||||
mod-outdated:
|
||||
$(BUILDX_CMD) bake mod-outdated
|
||||
|
||||
.PHONY: generated-files
|
||||
generated-files:
|
||||
$(BUILDX_CMD) bake update-generated-files
|
||||
|
||||
453
PROJECT.md
453
PROJECT.md
@@ -1,453 +0,0 @@
|
||||
# Project processing guide <!-- omit from toc -->
|
||||
|
||||
- [Project scope](#project-scope)
|
||||
- [Labels](#labels)
|
||||
- [Global](#global)
|
||||
- [`area/`](#area)
|
||||
- [`exp/`](#exp)
|
||||
- [`impact/`](#impact)
|
||||
- [`kind/`](#kind)
|
||||
- [`needs/`](#needs)
|
||||
- [`priority/`](#priority)
|
||||
- [`status/`](#status)
|
||||
- [Types of releases](#types-of-releases)
|
||||
- [Feature releases](#feature-releases)
|
||||
- [Release Candidates](#release-candidates)
|
||||
- [Support Policy](#support-policy)
|
||||
- [Contributing to Releases](#contributing-to-releases)
|
||||
- [Patch releases](#patch-releases)
|
||||
- [Milestones](#milestones)
|
||||
- [Triage process](#triage-process)
|
||||
- [Verify essential information](#verify-essential-information)
|
||||
- [Classify the issue](#classify-the-issue)
|
||||
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||
- [Issue lifecyle](#issue-lifecyle)
|
||||
- [Examples](#examples)
|
||||
- [Submitting a bug](#submitting-a-bug)
|
||||
- [Pull request review process](#pull-request-review-process)
|
||||
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||
- [Moving to a discussion](#moving-to-a-discussion)
|
||||
- [Workflow automation](#workflow-automation)
|
||||
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||
- [Updating dependencies](#updating-dependencies)
|
||||
|
||||
---
|
||||
|
||||
## Project scope
|
||||
|
||||
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||
|
||||
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||
|
||||
## Labels
|
||||
|
||||
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||
|
||||
### Global
|
||||
|
||||
General attributes that can apply to nearly any issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||
| `bot` | Issues, PRs | Created by a bot |
|
||||
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||
| `help wanted` | Issues, PRs | Assistance requested |
|
||||
| `lgtm` | PRs | “Looks good to me” approval |
|
||||
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||
|
||||
### `area/`
|
||||
|
||||
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------------------ | ---------- | -------------------------- |
|
||||
| `area/bake` | Any | `bake` |
|
||||
| `area/bake/compose` | Any | `bake/compose` |
|
||||
| `area/build` | Any | `build` |
|
||||
| `area/builder` | Any | `builder` |
|
||||
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||
| `area/cache` | Any | `cache` |
|
||||
| `area/checks` | Any | `checks` |
|
||||
| `area/ci` | Any | Project CI |
|
||||
| `area/cli` | Any | `cli` |
|
||||
| `area/controller` | Any | `controller` |
|
||||
| `area/debug` | Any | `debug` |
|
||||
| `area/dependencies` | Any | Project dependencies |
|
||||
| `area/dockerfile` | Any | `dockerfile` |
|
||||
| `area/docs` | Any | `docs` |
|
||||
| `area/driver` | Any | `driver` |
|
||||
| `area/driver/docker` | Any | `driver/docker` |
|
||||
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||
| `area/driver/remote` | Any | `driver/remote` |
|
||||
| `area/feature-parity` | Any | `feature-parity` |
|
||||
| `area/github-actions` | Any | `github-actions` |
|
||||
| `area/hack` | Any | Project hack/support |
|
||||
| `area/imagetools` | Any | `imagetools` |
|
||||
| `area/metrics` | Any | `metrics` |
|
||||
| `area/moby` | Any | Relates to `moby/moby` |
|
||||
| `area/project` | Any | Project support |
|
||||
| `area/qemu` | Any | `qemu` |
|
||||
| `area/tests` | Any | Project testing |
|
||||
| `area/windows` | Any | `windows` |
|
||||
|
||||
### `exp/`
|
||||
|
||||
Estimated experience level to complete the item
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||
|
||||
### `impact/`
|
||||
|
||||
Potential impact areas of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| -------------------- | ---------- | -------------------------------------------------- |
|
||||
| `impact/breaking` | PR | Change is API-breaking |
|
||||
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||
|
||||
|
||||
### `kind/`
|
||||
|
||||
The type of issue, pull request or discussion
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||
| `kind/chore` | Issue, PR | Project support tasks |
|
||||
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||
| `kind/duplicate` | Any | Duplicate of another item |
|
||||
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||
| `kind/feature` | Any | A brand new feature |
|
||||
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||
|
||||
### `needs/`
|
||||
|
||||
Actions or missing requirements needed by the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||
| `needs/code-review` | PR | Needs review of code |
|
||||
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||
| `needs/issue` | PR | Needs an issue |
|
||||
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||
| `needs/more-info` | Any | Needs more information from the author |
|
||||
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||
| `needs/pull-request` | Issue | Needs a pull request |
|
||||
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||
|
||||
### `priority/`
|
||||
|
||||
Level of urgency of a `kind/bug` issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||
|
||||
### `status/`
|
||||
|
||||
Current lifecycle state of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||
| `status/transfer` | Any | Transferred to another project |
|
||||
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||
|
||||
## Types of releases
|
||||
|
||||
This project has feature releases, patch releases, and security releases.
|
||||
|
||||
### Feature releases
|
||||
|
||||
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||
|
||||
#### Support Policy
|
||||
|
||||
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||
|
||||
#### Contributing to Releases
|
||||
|
||||
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||
|
||||
### Patch releases
|
||||
|
||||
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||
|
||||
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||
|
||||
Following PRs are included in patch releases
|
||||
|
||||
- `priority/P0` fixes
|
||||
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||
- `priority/P2` fixes, only if (both required)
|
||||
- proposed by maintainer
|
||||
- the patch is trivial and self-contained
|
||||
- Documentation-only patches
|
||||
- Vendored dependency updates, only if:
|
||||
- Fixing (qualifying) bug or security issue in Buildx
|
||||
- The patch is small, else a forked version of the dependency with only the patches required
|
||||
|
||||
New features do not qualify for patch release.
|
||||
|
||||
## Milestones
|
||||
|
||||
Milestones are used to help identify what releases a contribution will be in.
|
||||
|
||||
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||
|
||||
## Triage process
|
||||
|
||||
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||
|
||||
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||
- Lowering the issue count by preventing duplicate issues.
|
||||
- Streamlining the development process by preventing duplicate discussions.
|
||||
|
||||
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||
|
||||
1. Verify essential information
|
||||
2. Classify the issue
|
||||
3. Prioritizing the issue
|
||||
|
||||
### Verify essential information
|
||||
|
||||
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||
|
||||
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||
|
||||
### Classify the issue
|
||||
|
||||
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||
|
||||
- (Required) One or more [`area/*`](#area) labels
|
||||
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||
|
||||
When assigning a decision the following labels should be present:
|
||||
|
||||
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||
|
||||
Additional labels can provide more clarity:
|
||||
|
||||
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||
- Zero or more [`impact/*`](#impact) labels
|
||||
- One [`exp/*`](#exp) label
|
||||
|
||||
## Prioritization guidelines for `kind/bug`
|
||||
|
||||
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||
|
||||
**priority/P0**
|
||||
|
||||
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||
|
||||
Examples:
|
||||
|
||||
- Regression in a critical code path
|
||||
- Panic in a critical code path
|
||||
- Corruption in critical code path or rest of the system
|
||||
- Leaked zero-day critical security
|
||||
|
||||
**priority/P1**
|
||||
|
||||
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||
|
||||
Examples:
|
||||
|
||||
- Any regression, panic
|
||||
- Measurable performance regression
|
||||
- A major bug in a new feature in the latest release
|
||||
- Incompatibility with upgraded external dependency
|
||||
|
||||
**priority/P2**
|
||||
|
||||
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||
|
||||
Examples:
|
||||
|
||||
- Confirmed bugs
|
||||
- Bugs in non-default configurations
|
||||
- Most enhancements
|
||||
|
||||
## Issue lifecyle
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
create([New issue]) --> triage
|
||||
subgraph triage[Triage Loop]
|
||||
review[Review]
|
||||
end
|
||||
subgraph decision[Decision]
|
||||
accept[Accept]
|
||||
close[Close]
|
||||
end
|
||||
triage -- if accepted --> accept[Assign status, milestone]
|
||||
triage -- if rejected --> close[Assign status, close issue]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
#### Submitting a bug
|
||||
|
||||
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
|
||||
new([New issue])
|
||||
|
||||
subgraph triage[Triage]
|
||||
direction LR
|
||||
|
||||
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||
style create text-align:left
|
||||
|
||||
subgraph review[Review]
|
||||
direction TB
|
||||
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style classify text-align:left
|
||||
|
||||
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style update text-align:left
|
||||
|
||||
classify --> update
|
||||
update --> classify
|
||||
end
|
||||
|
||||
create --> review
|
||||
end
|
||||
|
||||
subgraph decision[Decision]
|
||||
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||
style accept text-align: left
|
||||
end
|
||||
|
||||
new --> triage
|
||||
triage --> decision
|
||||
```
|
||||
|
||||
## Pull request review process
|
||||
|
||||
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||
|
||||
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
triage([Triage])
|
||||
draft[Draft PR]
|
||||
review[PR Review]
|
||||
closed{{Close PR}}
|
||||
merge{{Merge PR}}
|
||||
|
||||
subgraph feedback1[Feedback Loop]
|
||||
draft
|
||||
end
|
||||
subgraph feedback2[Feedback Loop]
|
||||
review
|
||||
end
|
||||
|
||||
triage --> draft
|
||||
draft --> review
|
||||
review --> closed
|
||||
review --> draft
|
||||
review --> merge
|
||||
```
|
||||
|
||||
## Handling stalled issues and pull requests
|
||||
|
||||
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||
|
||||
**For Authors:**
|
||||
|
||||
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||
|
||||
**For Maintainers:**
|
||||
|
||||
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||
|
||||
**Stale and Rotten Policy:**
|
||||
|
||||
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||
|
||||
**Skipping Stale Processing:**
|
||||
|
||||
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||
|
||||
**Exceptions to Stale Processing:**
|
||||
|
||||
- Issues or PRs marked as **`frozen`**.
|
||||
- Issues or PRs assigned to a milestone.
|
||||
|
||||
## Moving to a discussion
|
||||
|
||||
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||
|
||||
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||
|
||||
## Workflow automation
|
||||
|
||||
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||
|
||||
- Stale issue or pull request processing
|
||||
- Auto-labeling actions
|
||||
- Auto-response actions
|
||||
- Label carry over from issue to pull request
|
||||
|
||||
### Exempting an issue/PR from stale bot processing
|
||||
|
||||
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||
|
||||
- Add the item to a milestone
|
||||
- Add the `frozen` label to the item
|
||||
|
||||
## Updating dependencies
|
||||
|
||||
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
||||
54
README.md
54
README.md
@@ -32,6 +32,19 @@ Key features:
|
||||
- [Building with buildx](#building-with-buildx)
|
||||
- [Working with builder instances](#working-with-builder-instances)
|
||||
- [Building multi-platform images](#building-multi-platform-images)
|
||||
- [Manuals](docs/manuals)
|
||||
- [High-level build options with Bake](docs/manuals/bake/index.md)
|
||||
- [Drivers](docs/manuals/drivers/index.md)
|
||||
- [Exporters](docs/manuals/exporters/index.md)
|
||||
- [Cache backends](docs/manuals/cache/backends/index.md)
|
||||
- [Guides](docs/guides)
|
||||
- [CI/CD](docs/guides/cicd.md)
|
||||
- [CNI networking](docs/guides/cni-networking.md)
|
||||
- [Using a custom network](docs/guides/custom-network.md)
|
||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
||||
- [OpenTelemetry support](docs/guides/opentelemetry.md)
|
||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
||||
- [Reference](docs/reference/buildx.md)
|
||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||
- [`buildx build`](docs/reference/buildx_build.md)
|
||||
@@ -41,25 +54,21 @@ Key features:
|
||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
||||
- [`buildx install`](docs/reference/buildx_install.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
||||
- [`buildx use`](docs/reference/buildx_use.md)
|
||||
- [`buildx version`](docs/reference/buildx_version.md)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
For more information on how to use Buildx, see
|
||||
[Docker Build docs](https://docs.docker.com/build/).
|
||||
|
||||
# Installing
|
||||
|
||||
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
||||
|
||||
> [!WARNING]
|
||||
> Using an incompatible version of Docker may result in unexpected behavior,
|
||||
> and will likely cause issues, especially when using Buildx builders with more
|
||||
> recent versions of BuildKit.
|
||||
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
||||
A limited set of functionality works with older versions of Docker when
|
||||
invoking the binary directly.
|
||||
|
||||
## Windows and macOS
|
||||
|
||||
@@ -68,13 +77,13 @@ for Windows and macOS.
|
||||
|
||||
## Linux packages
|
||||
|
||||
Docker Engine package repositories contain Docker Buildx packages when installed according to the
|
||||
[Docker Engine install documentation](https://docs.docker.com/engine/install/). Install the
|
||||
`docker-buildx-plugin` package to install the Buildx plugin.
|
||||
Docker Linux packages also include Docker Buildx when installed using the
|
||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
||||
|
||||
## Manual download
|
||||
|
||||
> [!IMPORTANT]
|
||||
> **Important**
|
||||
>
|
||||
> This section is for unattended installation of the buildx component. These
|
||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||
> installing buildx using manual download in production environments as they
|
||||
@@ -105,7 +114,8 @@ On Windows:
|
||||
* `C:\ProgramData\Docker\cli-plugins`
|
||||
* `C:\Program Files\Docker\cli-plugins`
|
||||
|
||||
> [!NOTE]
|
||||
> **Note**
|
||||
>
|
||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||
> ```shell
|
||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
@@ -144,7 +154,7 @@ $ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docke
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
# Local
|
||||
# Local
|
||||
$ git clone https://github.com/docker/buildx.git && cd buildx
|
||||
$ make install
|
||||
```
|
||||
@@ -184,12 +194,12 @@ through various "drivers". Each driver defines how and where a build should
|
||||
run, and have different feature sets.
|
||||
|
||||
We currently support the following drivers:
|
||||
- The `docker` driver ([guide](https://docs.docker.com/build/drivers/docker/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `docker-container` driver ([guide](https://docs.docker.com/build/drivers/docker-container/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `kubernetes` driver ([guide](https://docs.docker.com/build/drivers/kubernetes/), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `remote` driver ([guide](https://docs.docker.com/build/drivers/remote/))
|
||||
- The `docker` driver ([guide](docs/manuals/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `docker-container` driver ([guide](docs/manuals/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `kubernetes` driver ([guide](docs/manuals/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
||||
- The `remote` driver ([guide](docs/manuals/drivers/remote.md))
|
||||
|
||||
For more information on drivers, see the [drivers guide](https://docs.docker.com/build/drivers/).
|
||||
For more information on drivers, see the [drivers guide](docs/manuals/drivers/index.md).
|
||||
|
||||
## Working with builder instances
|
||||
|
||||
@@ -236,7 +246,7 @@ When you invoke a build, you can set the `--platform` flag to specify the target
|
||||
platform for the build output, (for example, `linux/amd64`, `linux/arm64`, or
|
||||
`darwin/amd64`).
|
||||
|
||||
When the current builder instance is backed by the `docker-container` or
|
||||
When the current builder instance is backed by the `docker-container` or
|
||||
`kubernetes` driver, you can specify multiple platforms together. In this case,
|
||||
it builds a manifest list which contains images for all specified architectures.
|
||||
When you use this image in [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)
|
||||
@@ -306,7 +316,7 @@ cross-compilation helpers for more advanced use-cases.
|
||||
|
||||
## High-level build options
|
||||
|
||||
See [High-level builds with Bake](https://docs.docker.com/build/bake/) for more details.
|
||||
See [`docs/manuals/bake/index.md`](docs/manuals/bake/index.md) for more details.
|
||||
|
||||
# Contributing
|
||||
|
||||
|
||||
947
bake/bake.go
947
bake/bake.go
File diff suppressed because it is too large
Load Diff
1045
bake/bake_test.go
1045
bake/bake_test.go
File diff suppressed because it is too large
Load Diff
187
bake/compose.go
187
bake/compose.go
@@ -1,20 +1,13 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/consts"
|
||||
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||
"github.com/compose-spec/compose-go/v2/loader"
|
||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -24,9 +17,9 @@ func ParseComposeFiles(fs []File) (*Config, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cfgs []composetypes.ConfigFile
|
||||
var cfgs []compose.ConfigFile
|
||||
for _, f := range fs {
|
||||
cfgs = append(cfgs, composetypes.ConfigFile{
|
||||
cfgs = append(cfgs, compose.ConfigFile{
|
||||
Filename: f.Name,
|
||||
Content: f.Data,
|
||||
})
|
||||
@@ -34,21 +27,12 @@ func ParseComposeFiles(fs []File) (*Config, error) {
|
||||
return ParseCompose(cfgs, envs)
|
||||
}
|
||||
|
||||
func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Config, error) {
|
||||
if envs == nil {
|
||||
envs = make(map[string]string)
|
||||
}
|
||||
cfg, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
|
||||
func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) {
|
||||
cfg, err := loader.Load(compose.ConfigDetails{
|
||||
ConfigFiles: cfgs,
|
||||
Environment: envs,
|
||||
}, func(options *loader.Options) {
|
||||
projectName := "bake"
|
||||
if v, ok := envs[consts.ComposeProjectName]; ok && v != "" {
|
||||
projectName = v
|
||||
}
|
||||
options.SetProjectName(projectName, false)
|
||||
options.SkipNormalization = true
|
||||
options.Profiles = []string{"*"}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -62,7 +46,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
g := &Group{Name: "default"}
|
||||
|
||||
for _, s := range cfg.Services {
|
||||
s := s
|
||||
if s.Build == nil {
|
||||
continue
|
||||
}
|
||||
@@ -82,54 +65,8 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
dockerfilePath := s.Build.Dockerfile
|
||||
dockerfilePathP = &dockerfilePath
|
||||
}
|
||||
var dockerfileInlineP *string
|
||||
if s.Build.DockerfileInline != "" {
|
||||
dockerfileInline := s.Build.DockerfileInline
|
||||
dockerfileInlineP = &dockerfileInline
|
||||
}
|
||||
|
||||
var additionalContexts map[string]string
|
||||
if s.Build.AdditionalContexts != nil {
|
||||
additionalContexts = map[string]string{}
|
||||
for k, v := range s.Build.AdditionalContexts {
|
||||
additionalContexts[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var shmSize *string
|
||||
if s.Build.ShmSize > 0 {
|
||||
shmSizeBytes := dockeropts.MemBytes(s.Build.ShmSize)
|
||||
shmSizeStr := shmSizeBytes.String()
|
||||
shmSize = &shmSizeStr
|
||||
}
|
||||
|
||||
var networkModeP *string
|
||||
if s.Build.Network != "" {
|
||||
networkMode := s.Build.Network
|
||||
networkModeP = &networkMode
|
||||
}
|
||||
|
||||
var ulimits []string
|
||||
if s.Build.Ulimits != nil {
|
||||
for n, u := range s.Build.Ulimits {
|
||||
ulimit, err := units.ParseUlimit(fmt.Sprintf("%s=%d:%d", n, u.Soft, u.Hard))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ulimits = append(ulimits, ulimit.String())
|
||||
}
|
||||
}
|
||||
|
||||
var ssh []*buildflags.SSH
|
||||
for _, bkey := range s.Build.SSH {
|
||||
sshkey := composeToBuildkitSSH(bkey)
|
||||
ssh = append(ssh, sshkey)
|
||||
}
|
||||
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
|
||||
return a.Less(b)
|
||||
})
|
||||
|
||||
var secrets []*buildflags.Secret
|
||||
var secrets []string
|
||||
for _, bs := range s.Build.Secrets {
|
||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||
if err != nil {
|
||||
@@ -141,29 +78,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
// compose does not support nil values for labels
|
||||
labels := map[string]*string{}
|
||||
for k, v := range s.Build.Labels {
|
||||
v := v
|
||||
labels[k] = &v
|
||||
}
|
||||
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g.Targets = append(g.Targets, targetName)
|
||||
t := &Target{
|
||||
Name: targetName,
|
||||
Context: contextPathP,
|
||||
Contexts: additionalContexts,
|
||||
Dockerfile: dockerfilePathP,
|
||||
DockerfileInline: dockerfileInlineP,
|
||||
Tags: s.Build.Tags,
|
||||
Labels: labels,
|
||||
Name: targetName,
|
||||
Context: contextPathP,
|
||||
Dockerfile: dockerfilePathP,
|
||||
Tags: s.Build.Tags,
|
||||
Labels: labels,
|
||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||
if val, ok := s.Environment[val]; ok && val != nil {
|
||||
return *val, true
|
||||
@@ -171,13 +95,10 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
val, ok := cfg.Environment[val]
|
||||
return val, ok
|
||||
})),
|
||||
CacheFrom: cacheFrom,
|
||||
CacheTo: cacheTo,
|
||||
NetworkMode: networkModeP,
|
||||
SSH: ssh,
|
||||
CacheFrom: s.Build.CacheFrom,
|
||||
CacheTo: s.Build.CacheTo,
|
||||
NetworkMode: &s.Build.Network,
|
||||
Secrets: secrets,
|
||||
ShmSize: shmSize,
|
||||
Ulimits: ulimits,
|
||||
}
|
||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||
return nil, err
|
||||
@@ -192,6 +113,7 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
c.Targets = append(c.Targets, t)
|
||||
}
|
||||
c.Groups = append(c.Groups, g)
|
||||
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
@@ -214,15 +136,14 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
|
||||
}
|
||||
|
||||
func validateCompose(dt []byte, envs map[string]string) error {
|
||||
_, err := loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
_, err := loader.Load(compose.ConfigDetails{
|
||||
ConfigFiles: []compose.ConfigFile{
|
||||
{
|
||||
Content: dt,
|
||||
},
|
||||
},
|
||||
Environment: envs,
|
||||
}, func(options *loader.Options) {
|
||||
options.SetProjectName("bake", false)
|
||||
options.SkipNormalization = true
|
||||
// consistency is checked later in ParseCompose to ensure multiple
|
||||
// compose files can be merged together
|
||||
@@ -263,7 +184,7 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envs, err := dotenv.UnmarshalBytesWithLookup(dt, nil)
|
||||
envs, err := dotenv.UnmarshalBytes(dt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -278,7 +199,7 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
|
||||
return curenv, nil
|
||||
}
|
||||
|
||||
func flatten(in composetypes.MappingWithEquals) map[string]*string {
|
||||
func flatten(in compose.MappingWithEquals) map[string]*string {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -307,13 +228,11 @@ type xbake struct {
|
||||
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
||||
Contexts stringMap `yaml:"contexts,omitempty"`
|
||||
// don't forget to update documentation if you add a new field:
|
||||
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||
// docs/manuals/bake/compose-file.md#extension-field-with-x-bake
|
||||
}
|
||||
|
||||
type (
|
||||
stringMap map[string]string
|
||||
stringArray []string
|
||||
)
|
||||
type stringMap map[string]string
|
||||
type stringArray []string
|
||||
|
||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var multi []string
|
||||
@@ -349,45 +268,22 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||
}
|
||||
if len(xb.CacheFrom) > 0 {
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
||||
}
|
||||
if len(xb.CacheTo) > 0 {
|
||||
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.CacheTo = t.CacheTo.Merge(cacheTo)
|
||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
||||
}
|
||||
if len(xb.Secrets) > 0 {
|
||||
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Secrets = t.Secrets.Merge(secrets)
|
||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
||||
}
|
||||
if len(xb.SSH) > 0 {
|
||||
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.SSH = t.SSH.Merge(ssh)
|
||||
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
|
||||
return a.Less(b)
|
||||
})
|
||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
||||
}
|
||||
if len(xb.Platforms) > 0 {
|
||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||
}
|
||||
if len(xb.Outputs) > 0 {
|
||||
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Outputs = t.Outputs.Merge(outputs)
|
||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
||||
}
|
||||
if xb.Pull != nil {
|
||||
t.Pull = xb.Pull
|
||||
@@ -407,30 +303,21 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||
|
||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||
// csv format.
|
||||
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
|
||||
if psecret.External {
|
||||
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
||||
if psecret.External.External {
|
||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||
}
|
||||
|
||||
secret := &buildflags.Secret{}
|
||||
var bkattrs []string
|
||||
if inp.Source != "" {
|
||||
secret.ID = inp.Source
|
||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
||||
}
|
||||
if psecret.File != "" {
|
||||
secret.FilePath = psecret.File
|
||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
||||
}
|
||||
if psecret.Environment != "" {
|
||||
secret.Env = psecret.Environment
|
||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||
// csv format.
|
||||
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
|
||||
bkssh := &buildflags.SSH{ID: sshKey.ID}
|
||||
if sshKey.Path != "" {
|
||||
bkssh.Paths = []string{sshKey.Path}
|
||||
}
|
||||
return bkssh
|
||||
return strings.Join(bkattrs, ","), nil
|
||||
}
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseCompose(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
build: ./db
|
||||
@@ -21,8 +21,6 @@ services:
|
||||
webapp:
|
||||
build:
|
||||
context: ./dir
|
||||
additional_contexts:
|
||||
foo: ./bar
|
||||
dockerfile: Dockerfile-alternate
|
||||
network:
|
||||
none
|
||||
@@ -32,19 +30,9 @@ services:
|
||||
- type=local,src=path/to/cache
|
||||
cache_to:
|
||||
- type=local,dest=path/to/cache
|
||||
ssh:
|
||||
- key=/path/to/key
|
||||
- default
|
||||
secrets:
|
||||
- token
|
||||
- aws
|
||||
webapp2:
|
||||
profiles:
|
||||
- test
|
||||
build:
|
||||
context: ./dir
|
||||
dockerfile_inline: |
|
||||
FROM alpine
|
||||
secrets:
|
||||
token:
|
||||
environment: ENV_TOKEN
|
||||
@@ -52,58 +40,52 @@ secrets:
|
||||
file: /root/.aws/credentials
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, "default", c.Groups[0].Name)
|
||||
sort.Strings(c.Groups[0].Targets)
|
||||
require.Equal(t, []string{"db", "webapp", "webapp2"}, c.Groups[0].Targets)
|
||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||
|
||||
require.Equal(t, 3, len(c.Targets))
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
return c.Targets[i].Name < c.Targets[j].Name
|
||||
})
|
||||
require.Equal(t, "db", c.Targets[0].Name)
|
||||
require.Equal(t, "db", *c.Targets[0].Context)
|
||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
||||
|
||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||
require.Equal(t, "dir", *c.Targets[1].Context)
|
||||
require.Equal(t, map[string]string{"foo": "bar"}, c.Targets[1].Contexts)
|
||||
require.Equal(t, "./dir", *c.Targets[1].Context)
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
|
||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
|
||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||
require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
|
||||
require.Equal(t, []string{
|
||||
"id=aws,src=/root/.aws/credentials",
|
||||
"id=token,env=ENV_TOKEN",
|
||||
}, stringify(c.Targets[1].Secrets))
|
||||
|
||||
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||
require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline)
|
||||
"id=aws,src=/root/.aws/credentials",
|
||||
}, c.Targets[1].Secrets)
|
||||
}
|
||||
|
||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
external:
|
||||
image: "verycooldb:1337"
|
||||
webapp:
|
||||
build: ./db
|
||||
`)
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Groups))
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
}
|
||||
|
||||
func TestParseComposeTarget(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
build:
|
||||
@@ -115,7 +97,7 @@ services:
|
||||
target: webapp
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
@@ -129,7 +111,7 @@ services:
|
||||
}
|
||||
|
||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
build:
|
||||
@@ -140,7 +122,7 @@ services:
|
||||
target: webapp
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
@@ -153,7 +135,7 @@ services:
|
||||
}
|
||||
|
||||
func TestBuildArgEnvCompose(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
version: "3.8"
|
||||
services:
|
||||
example:
|
||||
@@ -171,7 +153,7 @@ services:
|
||||
t.Setenv("BAR", "foo")
|
||||
t.Setenv("ZZZ_BAR", "zzz_foo")
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["FOO"])
|
||||
require.Equal(t, ptrstr("zzz_foo"), c.Targets[0].Args["BAR"])
|
||||
@@ -179,18 +161,18 @@ services:
|
||||
}
|
||||
|
||||
func TestInconsistentComposeFile(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
webapp:
|
||||
entrypoint: echo 1
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAdvancedNetwork(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
db:
|
||||
networks:
|
||||
@@ -210,12 +192,12 @@ networks:
|
||||
gateway: 10.5.0.254
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTags(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
example:
|
||||
image: example
|
||||
@@ -227,13 +209,13 @@ services:
|
||||
- bar
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
||||
}
|
||||
|
||||
func TestDependsOnList(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
@@ -264,12 +246,12 @@ networks:
|
||||
name: test-net
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestComposeExt(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
addon:
|
||||
image: ct-addon:bar
|
||||
@@ -282,8 +264,6 @@ services:
|
||||
- user/app:cache
|
||||
tags:
|
||||
- ct-addon:baz
|
||||
ssh:
|
||||
key: /path/to/key
|
||||
args:
|
||||
CT_ECR: foo
|
||||
CT_TAG: bar
|
||||
@@ -293,9 +273,6 @@ services:
|
||||
tags:
|
||||
- ct-addon:foo
|
||||
- ct-addon:alp
|
||||
ssh:
|
||||
- default
|
||||
- other=path/to/otherkey
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
@@ -312,11 +289,6 @@ services:
|
||||
args:
|
||||
CT_ECR: foo
|
||||
CT_TAG: bar
|
||||
shm_size: 128m
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 1024
|
||||
hard: 1024
|
||||
x-bake:
|
||||
secret:
|
||||
- id=mysecret,src=/local/secret
|
||||
@@ -327,7 +299,7 @@ services:
|
||||
no-cache: true
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
@@ -336,23 +308,20 @@ services:
|
||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||
require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
|
||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
|
||||
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
|
||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
|
||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||
}
|
||||
|
||||
func TestComposeExtDedup(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
webapp:
|
||||
image: app:bar
|
||||
@@ -363,8 +332,6 @@ services:
|
||||
- user/app:cache
|
||||
tags:
|
||||
- ct-addon:foo
|
||||
ssh:
|
||||
- default
|
||||
x-bake:
|
||||
tags:
|
||||
- ct-addon:foo
|
||||
@@ -374,18 +341,14 @@ services:
|
||||
- type=local,src=path/to/cache
|
||||
cache-to:
|
||||
- type=local,dest=path/to/cache
|
||||
ssh:
|
||||
- default
|
||||
- key=path/to/key
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.Targets))
|
||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||
}
|
||||
|
||||
func TestEnv(t *testing.T) {
|
||||
@@ -396,7 +359,7 @@ func TestEnv(t *testing.T) {
|
||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
@@ -413,7 +376,7 @@ services:
|
||||
- ` + envf.Name() + `
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "FOO": ptrstr("bsdf -csdf"), "NODE_ENV": ptrstr("test")}, c.Targets[0].Args)
|
||||
}
|
||||
@@ -424,7 +387,7 @@ func TestDotEnv(t *testing.T) {
|
||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
@@ -443,7 +406,7 @@ services:
|
||||
}
|
||||
|
||||
func TestPorts(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
foo:
|
||||
build:
|
||||
@@ -459,7 +422,7 @@ services:
|
||||
published: "3306"
|
||||
protocol: tcp
|
||||
`)
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -505,7 +468,7 @@ func TestServiceName(t *testing.T) {
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.svc, func(t *testing.T) {
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: []byte(`
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: []byte(`
|
||||
services:
|
||||
` + tt.svc + `:
|
||||
build:
|
||||
@@ -576,7 +539,7 @@ services:
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: tt.dt}}, nil)
|
||||
_, err := ParseCompose([]compose.ConfigFile{{Content: tt.dt}}, nil)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@@ -664,7 +627,7 @@ target "default" {
|
||||
}
|
||||
|
||||
func TestComposeNullArgs(t *testing.T) {
|
||||
dt := []byte(`
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
@@ -674,130 +637,11 @@ services:
|
||||
bar: "baz"
|
||||
`)
|
||||
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
|
||||
}
|
||||
|
||||
func TestDependsOn(t *testing.T) {
|
||||
dt := []byte(`
|
||||
services:
|
||||
foo:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- 3306:3306
|
||||
depends_on:
|
||||
- bar
|
||||
bar:
|
||||
build:
|
||||
context: .
|
||||
`)
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInclude(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
err := os.WriteFile(filepath.Join(tmpdir, "compose-foo.yml"), []byte(`
|
||||
services:
|
||||
foo:
|
||||
build:
|
||||
context: .
|
||||
target: buildfoo
|
||||
ports:
|
||||
- 3306:3306
|
||||
`), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt := []byte(`
|
||||
include:
|
||||
- compose-foo.yml
|
||||
|
||||
services:
|
||||
bar:
|
||||
build:
|
||||
context: .
|
||||
target: buildbar
|
||||
`)
|
||||
|
||||
chdir(t, tmpdir)
|
||||
c, err := ParseComposeFiles([]File{{
|
||||
Name: "composetypes.yml",
|
||||
Data: dt,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 2, len(c.Targets))
|
||||
sort.Slice(c.Targets, func(i, j int) bool {
|
||||
return c.Targets[i].Name < c.Targets[j].Name
|
||||
})
|
||||
require.Equal(t, "bar", c.Targets[0].Name)
|
||||
require.Equal(t, "buildbar", *c.Targets[0].Target)
|
||||
require.Equal(t, "foo", c.Targets[1].Name)
|
||||
require.Equal(t, "buildfoo", *c.Targets[1].Target)
|
||||
}
|
||||
|
||||
func TestDevelop(t *testing.T) {
|
||||
dt := []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: ./webapp
|
||||
develop:
|
||||
watch:
|
||||
- path: ./webapp/html
|
||||
action: sync
|
||||
target: /var/www
|
||||
ignore:
|
||||
- node_modules/
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCgroup(t *testing.T) {
|
||||
dt := []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: ./webapp
|
||||
cgroup: private
|
||||
`)
|
||||
|
||||
_, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProjectName(t *testing.T) {
|
||||
dt := []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: ./webapp
|
||||
args:
|
||||
PROJECT_NAME: ${COMPOSE_PROJECT_NAME}
|
||||
`)
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.Targets, 1)
|
||||
require.Len(t, c.Targets[0].Args, 1)
|
||||
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("bake")}, c.Targets[0].Args)
|
||||
})
|
||||
|
||||
t.Run("env", func(t *testing.T) {
|
||||
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, map[string]string{"COMPOSE_PROJECT_NAME": "foo"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.Targets, 1)
|
||||
require.Len(t, c.Targets[0].Args, 1)
|
||||
require.Equal(t, map[string]*string{"PROJECT_NAME": ptrstr("foo")}, c.Targets[0].Args)
|
||||
})
|
||||
}
|
||||
|
||||
// chdir changes the current working directory to the named directory,
|
||||
// and then restore the original working directory at the end of the test.
|
||||
func chdir(t *testing.T, dir string) {
|
||||
|
||||
@@ -1,601 +0,0 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type EntitlementKey string
|
||||
|
||||
const (
|
||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||
EntitlementKeyFS EntitlementKey = "fs"
|
||||
EntitlementKeyImagePush EntitlementKey = "image.push"
|
||||
EntitlementKeyImageLoad EntitlementKey = "image.load"
|
||||
EntitlementKeyImage EntitlementKey = "image"
|
||||
EntitlementKeySSH EntitlementKey = "ssh"
|
||||
)
|
||||
|
||||
type EntitlementConf struct {
|
||||
NetworkHost bool
|
||||
SecurityInsecure bool
|
||||
FSRead []string
|
||||
FSWrite []string
|
||||
ImagePush []string
|
||||
ImageLoad []string
|
||||
SSH bool
|
||||
}
|
||||
|
||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||
var conf EntitlementConf
|
||||
for _, e := range in {
|
||||
switch e {
|
||||
case string(EntitlementKeyNetworkHost):
|
||||
conf.NetworkHost = true
|
||||
case string(EntitlementKeySecurityInsecure):
|
||||
conf.SecurityInsecure = true
|
||||
case string(EntitlementKeySSH):
|
||||
conf.SSH = true
|
||||
default:
|
||||
k, v, _ := strings.Cut(e, "=")
|
||||
switch k {
|
||||
case string(EntitlementKeyFSRead):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
case string(EntitlementKeyFSWrite):
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyFS):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyImagePush):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
case string(EntitlementKeyImageLoad):
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
case string(EntitlementKeyImage):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
default:
|
||||
return conf, errors.Errorf("unknown entitlement key %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) {
|
||||
var expected EntitlementConf
|
||||
|
||||
for _, v := range m {
|
||||
if err := c.check(v, &expected); err != nil {
|
||||
return EntitlementConf{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return expected, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||
for _, e := range bo.Allow {
|
||||
switch e {
|
||||
case entitlements.EntitlementNetworkHost:
|
||||
if !c.NetworkHost {
|
||||
expected.NetworkHost = true
|
||||
}
|
||||
case entitlements.EntitlementSecurityInsecure:
|
||||
if !c.SecurityInsecure {
|
||||
expected.SecurityInsecure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rwPaths := map[string]struct{}{}
|
||||
roPaths := map[string]struct{}{}
|
||||
|
||||
for _, p := range collectLocalPaths(bo.Inputs) {
|
||||
roPaths[p] = struct{}{}
|
||||
}
|
||||
|
||||
for _, p := range bo.ExportsLocalPathsTemporary {
|
||||
rwPaths[p] = struct{}{}
|
||||
}
|
||||
|
||||
for _, ce := range bo.CacheTo {
|
||||
if ce.Type == "local" {
|
||||
if dest, ok := ce.Attrs["dest"]; ok {
|
||||
rwPaths[dest] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ci := range bo.CacheFrom {
|
||||
if ci.Type == "local" {
|
||||
if src, ok := ci.Attrs["src"]; ok {
|
||||
roPaths[src] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, secret := range bo.SecretSpecs {
|
||||
if secret.FilePath != "" {
|
||||
roPaths[secret.FilePath] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ssh := range bo.SSHSpecs {
|
||||
for _, p := range ssh.Paths {
|
||||
roPaths[p] = struct{}{}
|
||||
}
|
||||
if len(ssh.Paths) == 0 {
|
||||
if !c.SSH {
|
||||
expected.SSH = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
expected.FSRead, err = findMissingPaths(c.FSRead, roPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expected.FSWrite, err = findMissingPaths(c.FSWrite, rwPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Writer) error {
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||
term = true
|
||||
}
|
||||
|
||||
var msgs []string
|
||||
var flags []string
|
||||
|
||||
// these warnings are currently disabled to give users time to update
|
||||
var msgsFS []string
|
||||
var flagsFS []string
|
||||
|
||||
if c.NetworkHost {
|
||||
msgs = append(msgs, " - Running build containers that can access host network")
|
||||
flags = append(flags, string(EntitlementKeyNetworkHost))
|
||||
}
|
||||
if c.SecurityInsecure {
|
||||
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||
}
|
||||
|
||||
if c.SSH {
|
||||
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||
}
|
||||
|
||||
roPaths, rwPaths, commonPaths := groupSamePaths(c.FSRead, c.FSWrite)
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get current working directory")
|
||||
}
|
||||
wd, err = filepath.EvalSymlinks(wd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to evaluate working directory")
|
||||
}
|
||||
roPaths = toRelativePaths(roPaths, wd)
|
||||
rwPaths = toRelativePaths(rwPaths, wd)
|
||||
commonPaths = toRelativePaths(commonPaths, wd)
|
||||
|
||||
if len(commonPaths) > 0 {
|
||||
for _, p := range commonPaths {
|
||||
msgsFS = append(msgsFS, fmt.Sprintf(" - Read and write access to path %s", p))
|
||||
flagsFS = append(flagsFS, string(EntitlementKeyFS)+"="+p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(roPaths) > 0 {
|
||||
for _, p := range roPaths {
|
||||
msgsFS = append(msgsFS, fmt.Sprintf(" - Read access to path %s", p))
|
||||
flagsFS = append(flagsFS, string(EntitlementKeyFSRead)+"="+p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rwPaths) > 0 {
|
||||
for _, p := range rwPaths {
|
||||
msgsFS = append(msgsFS, fmt.Sprintf(" - Write access to path %s", p))
|
||||
flagsFS = append(flagsFS, string(EntitlementKeyFSWrite)+"="+p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(msgs) == 0 && len(msgsFS) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||
for _, m := range slices.Concat(msgs, msgsFS) {
|
||||
fmt.Fprintf(out, "%s\n", m)
|
||||
}
|
||||
|
||||
for i, f := range flags {
|
||||
flags[i] = "--allow=" + f
|
||||
}
|
||||
for i, f := range flagsFS {
|
||||
flagsFS[i] = "--allow=" + f
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||
} else {
|
||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
|
||||
}
|
||||
|
||||
args := append([]string(nil), os.Args...)
|
||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||
args[0] = v
|
||||
}
|
||||
idx := slices.Index(args, "bake")
|
||||
|
||||
if idx != -1 {
|
||||
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
|
||||
}
|
||||
|
||||
fsEntitlementsEnabled := true
|
||||
if isRemote {
|
||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||
vv, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
|
||||
}
|
||||
fsEntitlementsEnabled = !vv
|
||||
}
|
||||
}
|
||||
v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
|
||||
if fsEntitlementsSet {
|
||||
vv, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse BUILDX_BAKE_ENTITLEMENTS_FS value %q", v)
|
||||
}
|
||||
fsEntitlementsEnabled = vv
|
||||
}
|
||||
|
||||
if !fsEntitlementsEnabled && len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
|
||||
fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
answerCh := make(chan string, 1)
|
||||
go func() {
|
||||
answer, _, _ := reader.ReadLine()
|
||||
answerCh <- string(answer)
|
||||
close(answerCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case answer := <-answerCh:
|
||||
if strings.ToLower(string(answer)) == "y" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("additional privileges requested")
|
||||
}
|
||||
|
||||
func isParentOrEqualPath(p, parent string) bool {
|
||||
if p == parent || parent == "/" {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(p, filepath.Clean(parent+string(filepath.Separator))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func findMissingPaths(set []string, paths map[string]struct{}) ([]string, error) {
|
||||
set, allowAny, err := evaluatePaths(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if allowAny {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
paths, err = evaluateToExistingPaths(paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paths, err = dedupPaths(paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]string, 0, len(paths))
|
||||
loop0:
|
||||
for p := range paths {
|
||||
for _, c := range set {
|
||||
if isParentOrEqualPath(p, c) {
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slices.Sort(out)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func dedupPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||
arr := make([]string, 0, len(in))
|
||||
for p := range in {
|
||||
arr = append(arr, filepath.Clean(p))
|
||||
}
|
||||
|
||||
slices.SortFunc(arr, func(a, b string) int {
|
||||
return cmp.Compare(len(a), len(b))
|
||||
})
|
||||
|
||||
m := make(map[string]struct{}, len(arr))
|
||||
loop0:
|
||||
for _, p := range arr {
|
||||
for parent := range m {
|
||||
if strings.HasPrefix(p, parent+string(filepath.Separator)) {
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
m[p] = struct{}{}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toRelativePaths(in []string, wd string) []string {
|
||||
out := make([]string, 0, len(in))
|
||||
for _, p := range in {
|
||||
rel, err := filepath.Rel(wd, p)
|
||||
if err == nil {
|
||||
// allow up to one level of ".." in the path
|
||||
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)+"..") {
|
||||
out = append(out, rel)
|
||||
continue
|
||||
}
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func groupSamePaths(in1, in2 []string) ([]string, []string, []string) {
|
||||
if in1 == nil || in2 == nil {
|
||||
return in1, in2, nil
|
||||
}
|
||||
|
||||
slices.Sort(in1)
|
||||
slices.Sort(in2)
|
||||
|
||||
common := []string{}
|
||||
i, j := 0, 0
|
||||
|
||||
for i < len(in1) && j < len(in2) {
|
||||
switch {
|
||||
case in1[i] == in2[j]:
|
||||
common = append(common, in1[i])
|
||||
i++
|
||||
j++
|
||||
case in1[i] < in2[j]:
|
||||
i++
|
||||
default:
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
in1 = removeCommonPaths(in1, common)
|
||||
in2 = removeCommonPaths(in2, common)
|
||||
|
||||
return in1, in2, common
|
||||
}
|
||||
|
||||
func removeCommonPaths(in, common []string) []string {
|
||||
filtered := make([]string, 0, len(in))
|
||||
commonIndex := 0
|
||||
for _, path := range in {
|
||||
if commonIndex < len(common) && path == common[commonIndex] {
|
||||
commonIndex++
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, path)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func evaluatePaths(in []string) ([]string, bool, error) {
|
||||
out := make([]string, 0, len(in))
|
||||
allowAny := false
|
||||
for _, p := range in {
|
||||
if p == "*" {
|
||||
allowAny = true
|
||||
continue
|
||||
}
|
||||
v, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to evaluate entitlement path %q: %v", p, err)
|
||||
continue
|
||||
}
|
||||
v, rest, err := evaluateToExistingPath(v)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||
}
|
||||
v, err = osutil.GetLongPathName(v)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||
}
|
||||
if rest != "" {
|
||||
v = filepath.Join(v, rest)
|
||||
}
|
||||
out = append(out, v)
|
||||
}
|
||||
return out, allowAny, nil
|
||||
}
|
||||
|
||||
func evaluateToExistingPaths(in map[string]struct{}) (map[string]struct{}, error) {
|
||||
m := make(map[string]struct{}, len(in))
|
||||
for p := range in {
|
||||
v, _, err := evaluateToExistingPath(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||
}
|
||||
v, err = osutil.GetLongPathName(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to evaluate path %q", p)
|
||||
}
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func evaluateToExistingPath(in string) (string, string, error) {
|
||||
in, err := filepath.Abs(in)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
volLen := volumeNameLen(in)
|
||||
pathSeparator := string(os.PathSeparator)
|
||||
|
||||
if volLen < len(in) && os.IsPathSeparator(in[volLen]) {
|
||||
volLen++
|
||||
}
|
||||
vol := in[:volLen]
|
||||
dest := vol
|
||||
linksWalked := 0
|
||||
var end int
|
||||
for start := volLen; start < len(in); start = end {
|
||||
for start < len(in) && os.IsPathSeparator(in[start]) {
|
||||
start++
|
||||
}
|
||||
end = start
|
||||
for end < len(in) && !os.IsPathSeparator(in[end]) {
|
||||
end++
|
||||
}
|
||||
|
||||
if end == start {
|
||||
break
|
||||
} else if in[start:end] == "." {
|
||||
continue
|
||||
} else if in[start:end] == ".." {
|
||||
var r int
|
||||
for r = len(dest) - 1; r >= volLen; r-- {
|
||||
if os.IsPathSeparator(dest[r]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if r < volLen || dest[r+1:] == ".." {
|
||||
if len(dest) > volLen {
|
||||
dest += pathSeparator
|
||||
}
|
||||
dest += ".."
|
||||
} else {
|
||||
dest = dest[:r]
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) {
|
||||
dest += pathSeparator
|
||||
}
|
||||
dest += in[start:end]
|
||||
|
||||
fi, err := os.Lstat(dest)
|
||||
if err != nil {
|
||||
// If the component doesn't exist, return the last valid path
|
||||
if os.IsNotExist(err) {
|
||||
for r := len(dest) - 1; r >= volLen; r-- {
|
||||
if os.IsPathSeparator(dest[r]) {
|
||||
return dest[:r], in[start:], nil
|
||||
}
|
||||
}
|
||||
return vol, in[start:], nil
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if fi.Mode()&fs.ModeSymlink == 0 {
|
||||
if !fi.Mode().IsDir() && end < len(in) {
|
||||
return "", "", syscall.ENOTDIR
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
linksWalked++
|
||||
if linksWalked > 255 {
|
||||
return "", "", errors.New("too many symlinks")
|
||||
}
|
||||
|
||||
link, err := os.Readlink(dest)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
in = link + in[end:]
|
||||
|
||||
v := volumeNameLen(link)
|
||||
if v > 0 {
|
||||
if v < len(link) && os.IsPathSeparator(link[v]) {
|
||||
v++
|
||||
}
|
||||
vol = link[:v]
|
||||
dest = vol
|
||||
end = len(vol)
|
||||
} else if len(link) > 0 && os.IsPathSeparator(link[0]) {
|
||||
dest = link[:1]
|
||||
end = 1
|
||||
vol = link[:1]
|
||||
volLen = 1
|
||||
} else {
|
||||
var r int
|
||||
for r = len(dest) - 1; r >= volLen; r-- {
|
||||
if os.IsPathSeparator(dest[r]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if r < volLen {
|
||||
dest = vol
|
||||
} else {
|
||||
dest = dest[:r]
|
||||
}
|
||||
end = 0
|
||||
}
|
||||
}
|
||||
return filepath.Clean(dest), "", nil
|
||||
}
|
||||
|
||||
func volumeNameLen(s string) int {
|
||||
return len(filepath.VolumeName(s))
|
||||
}
|
||||
@@ -1,486 +0,0 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEvaluateToExistingPath(t *testing.T) {
|
||||
tempDir, err := osutil.GetLongPathName(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup temporary directory structure for testing
|
||||
existingFile := filepath.Join(tempDir, "existing_file")
|
||||
require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0644))
|
||||
|
||||
existingDir := filepath.Join(tempDir, "existing_dir")
|
||||
require.NoError(t, os.Mkdir(existingDir, 0755))
|
||||
|
||||
symlinkToFile := filepath.Join(tempDir, "symlink_to_file")
|
||||
require.NoError(t, os.Symlink(existingFile, symlinkToFile))
|
||||
|
||||
symlinkToDir := filepath.Join(tempDir, "symlink_to_dir")
|
||||
require.NoError(t, os.Symlink(existingDir, symlinkToDir))
|
||||
|
||||
nonexistentPath := filepath.Join(tempDir, "nonexistent", "path", "file.txt")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "Existing file",
|
||||
input: existingFile,
|
||||
expected: existingFile,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Existing directory",
|
||||
input: existingDir,
|
||||
expected: existingDir,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Symlink to file",
|
||||
input: symlinkToFile,
|
||||
expected: existingFile,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Symlink to directory",
|
||||
input: symlinkToDir,
|
||||
expected: existingDir,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Non-existent path",
|
||||
input: nonexistentPath,
|
||||
expected: tempDir,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Non-existent intermediate path",
|
||||
input: filepath.Join(tempDir, "nonexistent", "file.txt"),
|
||||
expected: tempDir,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Root path",
|
||||
input: "/",
|
||||
expected: func() string {
|
||||
root, _ := filepath.Abs("/")
|
||||
return root
|
||||
}(),
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, _, err := evaluateToExistingPath(tt.input)
|
||||
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDedupePaths(t *testing.T) {
|
||||
wd := osutil.GetWd()
|
||||
tcases := []struct {
|
||||
in map[string]struct{}
|
||||
out map[string]struct{}
|
||||
}{
|
||||
{
|
||||
in: map[string]struct{}{
|
||||
"/a/b/c": {},
|
||||
"/a/b/d": {},
|
||||
"/a/b/e": {},
|
||||
},
|
||||
out: map[string]struct{}{
|
||||
"/a/b/c": {},
|
||||
"/a/b/d": {},
|
||||
"/a/b/e": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: map[string]struct{}{
|
||||
"/a/b/c": {},
|
||||
"/a/b/c/d": {},
|
||||
"/a/b/c/d/e": {},
|
||||
"/a/b/../b/c": {},
|
||||
},
|
||||
out: map[string]struct{}{
|
||||
"/a/b/c": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: map[string]struct{}{
|
||||
filepath.Join(wd, "a/b/c"): {},
|
||||
filepath.Join(wd, "../aa"): {},
|
||||
filepath.Join(wd, "a/b"): {},
|
||||
filepath.Join(wd, "a/b/d"): {},
|
||||
filepath.Join(wd, "../aa/b"): {},
|
||||
filepath.Join(wd, "../../bb"): {},
|
||||
},
|
||||
out: map[string]struct{}{
|
||||
"a/b": {},
|
||||
"../aa": {},
|
||||
filepath.Join(wd, "../../bb"): {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tcases {
|
||||
t.Run(fmt.Sprintf("case%d", i), func(t *testing.T) {
|
||||
out, err := dedupPaths(tc.in)
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// convert to relative paths as that is shown to user
|
||||
arr := make([]string, 0, len(out))
|
||||
for k := range out {
|
||||
arr = append(arr, k)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
arr = toRelativePaths(arr, wd)
|
||||
m := make(map[string]struct{})
|
||||
for _, v := range arr {
|
||||
m[filepath.ToSlash(v)] = struct{}{}
|
||||
}
|
||||
o := make(map[string]struct{}, len(tc.out))
|
||||
for k := range tc.out {
|
||||
o[filepath.ToSlash(k)] = struct{}{}
|
||||
}
|
||||
require.Equal(t, o, m)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateEntitlements(t *testing.T) {
|
||||
dir1 := t.TempDir()
|
||||
dir2 := t.TempDir()
|
||||
|
||||
// the paths returned by entitlements validation will have symlinks resolved
|
||||
expDir1, err := filepath.EvalSymlinks(dir1)
|
||||
require.NoError(t, err)
|
||||
expDir2, err := filepath.EvalSymlinks(dir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
escapeLink := filepath.Join(dir1, "escape_link")
|
||||
require.NoError(t, os.Symlink("../../aa", escapeLink))
|
||||
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
expWd, err := filepath.EvalSymlinks(wd)
|
||||
require.NoError(t, err)
|
||||
|
||||
tcases := []struct {
|
||||
name string
|
||||
conf EntitlementConf
|
||||
opt build.Options
|
||||
expected EntitlementConf
|
||||
}{
|
||||
{
|
||||
name: "No entitlements",
|
||||
opt: build.Options{
|
||||
Inputs: build.Inputs{
|
||||
ContextState: &llb.State{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NetworkHostMissing",
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
NetworkHost: true,
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NetworkHostSet",
|
||||
conf: EntitlementConf{
|
||||
NetworkHost: true,
|
||||
},
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecurityAndNetworkHostMissing",
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
entitlements.EntitlementSecurityInsecure,
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
NetworkHost: true,
|
||||
SecurityInsecure: true,
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecurityMissingAndNetworkHostSet",
|
||||
conf: EntitlementConf{
|
||||
NetworkHost: true,
|
||||
},
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
entitlements.EntitlementSecurityInsecure,
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
SecurityInsecure: true,
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SSHMissing",
|
||||
opt: build.Options{
|
||||
SSHSpecs: []*pb.SSH{
|
||||
{
|
||||
ID: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
SSH: true,
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ExportLocal",
|
||||
opt: build.Options{
|
||||
ExportsLocalPathsTemporary: []string{
|
||||
dir1,
|
||||
filepath.Join(dir1, "subdir"),
|
||||
dir2,
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSWrite: func() []string {
|
||||
exp := []string{expDir1, expDir2}
|
||||
slices.Sort(exp)
|
||||
return exp
|
||||
}(),
|
||||
FSRead: []string{expWd},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecretFromSubFile",
|
||||
opt: build.Options{
|
||||
SecretSpecs: []*pb.Secret{
|
||||
{
|
||||
FilePath: filepath.Join(dir1, "subfile"),
|
||||
},
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{wd, dir1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecretFromEscapeLink",
|
||||
opt: build.Options{
|
||||
SecretSpecs: []*pb.Secret{
|
||||
{
|
||||
FilePath: escapeLink,
|
||||
},
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{wd, dir1},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSRead: []string{filepath.Join(expDir1, "../..")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecretFromEscapeLinkAllowRoot",
|
||||
opt: build.Options{
|
||||
SecretSpecs: []*pb.Secret{
|
||||
{
|
||||
FilePath: escapeLink,
|
||||
},
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{"/"},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSRead: func() []string {
|
||||
// on windows root (/) is only allowed if it is the same volume as wd
|
||||
if filepath.VolumeName(wd) == filepath.VolumeName(escapeLink) {
|
||||
return nil
|
||||
}
|
||||
// if not, then escapeLink is not allowed
|
||||
exp, _, err := evaluateToExistingPath(escapeLink)
|
||||
require.NoError(t, err)
|
||||
exp, err = filepath.EvalSymlinks(exp)
|
||||
require.NoError(t, err)
|
||||
return []string{exp}
|
||||
}(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SecretFromEscapeLinkAllowAny",
|
||||
opt: build.Options{
|
||||
SecretSpecs: []*pb.Secret{
|
||||
{
|
||||
FilePath: escapeLink,
|
||||
},
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{"*"},
|
||||
},
|
||||
expected: EntitlementConf{},
|
||||
},
|
||||
{
|
||||
name: "NonExistingAllowedPathSubpath",
|
||||
opt: build.Options{
|
||||
ExportsLocalPathsTemporary: []string{
|
||||
dir1,
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{wd},
|
||||
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSWrite: []string{expDir1}, // dir1 is still needed as only subpath was allowed
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonExistingAllowedPathMatches",
|
||||
opt: build.Options{
|
||||
ExportsLocalPathsTemporary: []string{
|
||||
filepath.Join(dir1, "not/exists"),
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{wd},
|
||||
FSWrite: []string{filepath.Join(dir1, "not/exists")},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
FSWrite: []string{expDir1}, // dir1 is still needed as build also needs to write not/exists directory
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NonExistingBuildPath",
|
||||
opt: build.Options{
|
||||
ExportsLocalPathsTemporary: []string{
|
||||
filepath.Join(dir1, "not/exists"),
|
||||
},
|
||||
},
|
||||
conf: EntitlementConf{
|
||||
FSRead: []string{wd},
|
||||
FSWrite: []string{dir1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
expected, err := tc.conf.Validate(map[string]build.Options{"test": tc.opt})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupSamePaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in1 []string
|
||||
in2 []string
|
||||
expected1 []string
|
||||
expected2 []string
|
||||
expectedC []string
|
||||
}{
|
||||
{
|
||||
name: "All common paths",
|
||||
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||
in2: []string{"/path/a", "/path/b", "/path/c"},
|
||||
expected1: []string{},
|
||||
expected2: []string{},
|
||||
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||
},
|
||||
{
|
||||
name: "No common paths",
|
||||
in1: []string{"/path/a", "/path/b"},
|
||||
in2: []string{"/path/c", "/path/d"},
|
||||
expected1: []string{"/path/a", "/path/b"},
|
||||
expected2: []string{"/path/c", "/path/d"},
|
||||
expectedC: []string{},
|
||||
},
|
||||
{
|
||||
name: "Some common paths",
|
||||
in1: []string{"/path/a", "/path/b", "/path/c"},
|
||||
in2: []string{"/path/b", "/path/c", "/path/d"},
|
||||
expected1: []string{"/path/a"},
|
||||
expected2: []string{"/path/d"},
|
||||
expectedC: []string{"/path/b", "/path/c"},
|
||||
},
|
||||
{
|
||||
name: "Empty inputs",
|
||||
in1: []string{},
|
||||
in2: []string{},
|
||||
expected1: []string{},
|
||||
expected2: []string{},
|
||||
expectedC: []string{},
|
||||
},
|
||||
{
|
||||
name: "One empty input",
|
||||
in1: []string{"/path/a", "/path/b"},
|
||||
in2: []string{},
|
||||
expected1: []string{"/path/a", "/path/b"},
|
||||
expected2: []string{},
|
||||
expectedC: []string{},
|
||||
},
|
||||
{
|
||||
name: "Unsorted inputs with common paths",
|
||||
in1: []string{"/path/c", "/path/a", "/path/b"},
|
||||
in2: []string{"/path/b", "/path/c", "/path/a"},
|
||||
expected1: []string{},
|
||||
expected2: []string{},
|
||||
expectedC: []string{"/path/a", "/path/b", "/path/c"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
out1, out2, common := groupSamePaths(tt.in1, tt.in2)
|
||||
require.Equal(t, tt.expected1, out1, "in1 should match expected1")
|
||||
require.Equal(t, tt.expected2, out2, "in2 should match expected2")
|
||||
require.Equal(t, tt.expectedC, common, "common should match expectedC")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func formatHCLError(err error, files []File) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
src := &errdefs.Source{
|
||||
src := errdefs.Source{
|
||||
Info: &pb.SourceInfo{
|
||||
Filename: d.Subject.Filename,
|
||||
Data: dt,
|
||||
@@ -72,7 +72,7 @@ func formatHCLError(err error, files []File) error {
|
||||
|
||||
func toErrRange(in *hcl.Range) *pb.Range {
|
||||
return &pb.Range{
|
||||
Start: &pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
||||
End: &pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
||||
Start: pb.Position{Line: int32(in.Start.Line), Character: int32(in.Start.Column)},
|
||||
End: pb.Position{Line: int32(in.End.Line), Character: int32(in.End.Column)},
|
||||
}
|
||||
}
|
||||
|
||||
764
bake/hcl_test.go
764
bake/hcl_test.go
File diff suppressed because it is too large
Load Diff
@@ -1,355 +0,0 @@
|
||||
Copyright (c) 2014 HashiCorp, Inc.
|
||||
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
@@ -14,7 +14,15 @@ func funcCalls(exp hcl.Expression) ([]string, hcl.Diagnostics) {
|
||||
if !ok {
|
||||
fns, err := jsonFuncCallsRecursive(exp)
|
||||
if err != nil {
|
||||
return nil, wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
return nil, hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: err.Error(),
|
||||
Subject: exp.Range().Ptr(),
|
||||
Context: exp.Range().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return fns, nil
|
||||
}
|
||||
|
||||
@@ -1,348 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
// DecodeOptions allows customizing sections of the decoding process.
|
||||
type DecodeOptions struct {
|
||||
ImpliedType func(gv interface{}) (cty.Type, error)
|
||||
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||
}
|
||||
|
||||
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
o = o.withDefaults()
|
||||
|
||||
rv := reflect.ValueOf(val)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||
}
|
||||
|
||||
return o.decodeBodyToValue(body, ctx, rv.Elem())
|
||||
}
|
||||
|
||||
// DecodeBody extracts the configuration within the given body into the given
|
||||
// value. This value must be a non-nil pointer to either a struct or
|
||||
// a map, where in the former case the configuration will be decoded using
|
||||
// struct tags and in the latter case only attributes are allowed and their
|
||||
// values are decoded into the map.
|
||||
//
|
||||
// The given EvalContext is used to resolve any variables or functions in
|
||||
// expressions encountered while decoding. This may be nil to require only
|
||||
// constant values, for simple applications that do not support variables or
|
||||
// functions.
|
||||
//
|
||||
// The returned diagnostics should be inspected with its HasErrors method to
|
||||
// determine if the populated value is valid and complete. If error diagnostics
|
||||
// are returned then the given value may have been partially-populated but
|
||||
// may still be accessed by a careful caller for static analysis and editor
|
||||
// integration use-cases.
|
||||
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||
}
|
||||
|
||||
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||
et := val.Type()
|
||||
switch et.Kind() {
|
||||
case reflect.Struct:
|
||||
return o.decodeBodyToStruct(body, ctx, val)
|
||||
case reflect.Map:
|
||||
return o.decodeBodyToMap(body, ctx, val)
|
||||
default:
|
||||
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||
schema, partial := ImpliedBodySchema(val.Interface())
|
||||
|
||||
var content *hcl.BodyContent
|
||||
var leftovers hcl.Body
|
||||
var diags hcl.Diagnostics
|
||||
if partial {
|
||||
content, leftovers, diags = body.PartialContent(schema)
|
||||
} else {
|
||||
content, diags = body.Content(schema)
|
||||
}
|
||||
if content == nil {
|
||||
return diags
|
||||
}
|
||||
|
||||
tags := getFieldTags(val.Type())
|
||||
|
||||
if tags.Body != nil {
|
||||
fieldIdx := *tags.Body
|
||||
field := val.Type().Field(fieldIdx)
|
||||
fieldV := val.Field(fieldIdx)
|
||||
switch {
|
||||
case bodyType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(body))
|
||||
|
||||
default:
|
||||
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
|
||||
}
|
||||
}
|
||||
|
||||
if tags.Remain != nil {
|
||||
fieldIdx := *tags.Remain
|
||||
field := val.Type().Field(fieldIdx)
|
||||
fieldV := val.Field(fieldIdx)
|
||||
switch {
|
||||
case bodyType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(leftovers))
|
||||
case attrsType.AssignableTo(field.Type):
|
||||
attrs, attrsDiags := leftovers.JustAttributes()
|
||||
if len(attrsDiags) > 0 {
|
||||
diags = append(diags, attrsDiags...)
|
||||
}
|
||||
fieldV.Set(reflect.ValueOf(attrs))
|
||||
default:
|
||||
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||
}
|
||||
}
|
||||
|
||||
for name, fieldIdx := range tags.Attributes {
|
||||
attr := content.Attributes[name]
|
||||
field := val.Type().Field(fieldIdx)
|
||||
fieldV := val.Field(fieldIdx)
|
||||
|
||||
if attr == nil {
|
||||
if !exprType.AssignableTo(field.Type) {
|
||||
continue
|
||||
}
|
||||
|
||||
// As a special case, if the target is of type hcl.Expression then
|
||||
// we'll assign an actual expression that evalues to a cty null,
|
||||
// so the caller can deal with it within the cty realm rather
|
||||
// than within the Go realm.
|
||||
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case attrType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(attr))
|
||||
case exprType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||
default:
|
||||
diags = append(diags, o.DecodeExpression(
|
||||
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||
)...)
|
||||
}
|
||||
}
|
||||
|
||||
blocksByType := content.Blocks.ByType()
|
||||
|
||||
for typeName, fieldIdx := range tags.Blocks {
|
||||
blocks := blocksByType[typeName]
|
||||
field := val.Type().Field(fieldIdx)
|
||||
|
||||
ty := field.Type
|
||||
isSlice := false
|
||||
isPtr := false
|
||||
if ty.Kind() == reflect.Slice {
|
||||
isSlice = true
|
||||
ty = ty.Elem()
|
||||
}
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
isPtr = true
|
||||
ty = ty.Elem()
|
||||
}
|
||||
|
||||
if len(blocks) > 1 && !isSlice {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||
Detail: fmt.Sprintf(
|
||||
"Only one %s block is allowed. Another was defined at %s.",
|
||||
typeName, blocks[0].DefRange.String(),
|
||||
),
|
||||
Subject: &blocks[1].DefRange,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
if isSlice || isPtr {
|
||||
if val.Field(fieldIdx).IsNil() {
|
||||
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||
}
|
||||
} else {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||
Subject: body.MissingItemRange().Ptr(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case isSlice:
|
||||
elemType := ty
|
||||
if isPtr {
|
||||
elemType = reflect.PointerTo(ty)
|
||||
}
|
||||
sli := val.Field(fieldIdx)
|
||||
if sli.IsNil() {
|
||||
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||
}
|
||||
|
||||
for i, block := range blocks {
|
||||
if isPtr {
|
||||
if i >= sli.Len() {
|
||||
sli = reflect.Append(sli, reflect.New(ty))
|
||||
}
|
||||
v := sli.Index(i)
|
||||
if v.IsNil() {
|
||||
v = reflect.New(ty)
|
||||
}
|
||||
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||
sli.Index(i).Set(v)
|
||||
} else {
|
||||
if i >= sli.Len() {
|
||||
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
|
||||
}
|
||||
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||
}
|
||||
}
|
||||
|
||||
if sli.Len() > len(blocks) {
|
||||
sli.SetLen(len(blocks))
|
||||
}
|
||||
|
||||
val.Field(fieldIdx).Set(sli)
|
||||
|
||||
default:
|
||||
block := blocks[0]
|
||||
if isPtr {
|
||||
v := val.Field(fieldIdx)
|
||||
if v.IsNil() {
|
||||
v = reflect.New(ty)
|
||||
}
|
||||
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||
val.Field(fieldIdx).Set(v)
|
||||
} else {
|
||||
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||
attrs, diags := body.JustAttributes()
|
||||
if attrs == nil {
|
||||
return diags
|
||||
}
|
||||
|
||||
mv := reflect.MakeMap(v.Type())
|
||||
|
||||
for k, attr := range attrs {
|
||||
switch {
|
||||
case attrType.AssignableTo(v.Type().Elem()):
|
||||
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||
case exprType.AssignableTo(v.Type().Elem()):
|
||||
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||
default:
|
||||
ev := reflect.New(v.Type().Elem())
|
||||
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||
}
|
||||
}
|
||||
|
||||
v.Set(mv)
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||
diags := o.decodeBodyToValue(block.Body, ctx, v)
|
||||
|
||||
if len(block.Labels) > 0 {
|
||||
blockTags := getFieldTags(v.Type())
|
||||
for li, lv := range block.Labels {
|
||||
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
o = o.withDefaults()
|
||||
|
||||
srcVal, diags := expr.Value(ctx)
|
||||
|
||||
convTy, err := o.ImpliedType(val)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||
}
|
||||
|
||||
srcVal, err = o.Convert(srcVal, convTy)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsuitable value type",
|
||||
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
})
|
||||
return diags
|
||||
}
|
||||
|
||||
err = gocty.FromCtyValue(srcVal, val)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsuitable value type",
|
||||
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// DecodeExpression extracts the value of the given expression into the given
|
||||
// value. This value must be something that gocty is able to decode into,
|
||||
// since the final decoding is delegated to that package.
|
||||
//
|
||||
// The given EvalContext is used to resolve any variables or functions in
|
||||
// expressions encountered while decoding. This may be nil to require only
|
||||
// constant values, for simple applications that do not support variables or
|
||||
// functions.
|
||||
//
|
||||
// The returned diagnostics should be inspected with its HasErrors method to
|
||||
// determine if the populated value is valid and complete. If error diagnostics
|
||||
// are returned then the given value may have been partially-populated but
|
||||
// may still be accessed by a careful caller for static analysis and editor
|
||||
// integration use-cases.
|
||||
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||
}
|
||||
|
||||
func (o DecodeOptions) withDefaults() DecodeOptions {
|
||||
if o.ImpliedType == nil {
|
||||
o.ImpliedType = gocty.ImpliedType
|
||||
}
|
||||
|
||||
if o.Convert == nil {
|
||||
o.Convert = convert.Convert
|
||||
}
|
||||
return o
|
||||
}
|
||||
@@ -1,806 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
hclJSON "github.com/hashicorp/hcl/v2/json"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestDecodeBody(t *testing.T) {
|
||||
deepEquals := func(other interface{}) func(v interface{}) bool {
|
||||
return func(v interface{}) bool {
|
||||
return reflect.DeepEqual(v, other)
|
||||
}
|
||||
}
|
||||
|
||||
type withNameExpression struct {
|
||||
Name hcl.Expression `hcl:"name"`
|
||||
}
|
||||
|
||||
type withTwoAttributes struct {
|
||||
A string `hcl:"a,optional"`
|
||||
B string `hcl:"b,optional"`
|
||||
}
|
||||
|
||||
type withNestedBlock struct {
|
||||
Plain string `hcl:"plain,optional"`
|
||||
Nested *withTwoAttributes `hcl:"nested,block"`
|
||||
}
|
||||
|
||||
type withListofNestedBlocks struct {
|
||||
Nested []*withTwoAttributes `hcl:"nested,block"`
|
||||
}
|
||||
|
||||
type withListofNestedBlocksNoPointers struct {
|
||||
Nested []withTwoAttributes `hcl:"nested,block"`
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Body map[string]interface{}
|
||||
Target func() interface{}
|
||||
Check func(v interface{}) bool
|
||||
DiagCount int
|
||||
}{
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(struct{}{}),
|
||||
deepEquals(struct{}{}),
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{}),
|
||||
1, // name is required
|
||||
},
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(struct {
|
||||
Name *string `hcl:"name"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name *string `hcl:"name"`
|
||||
}{}),
|
||||
0,
|
||||
}, // name nil
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name,optional"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name string `hcl:"name,optional"`
|
||||
}{}),
|
||||
0,
|
||||
}, // name optional
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(withNameExpression{}),
|
||||
func(v interface{}) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
wne, valid := v.(withNameExpression)
|
||||
if !valid {
|
||||
return false
|
||||
}
|
||||
|
||||
if wne.Name == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
nameVal, _ := wne.Name.Value(nil)
|
||||
return nameVal.IsNull()
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
},
|
||||
makeInstantiateType(withNameExpression{}),
|
||||
func(v interface{}) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
wne, valid := v.(withNameExpression)
|
||||
if !valid {
|
||||
return false
|
||||
}
|
||||
|
||||
if wne.Name == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
nameVal, _ := wne.Name.Value(nil)
|
||||
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{"Ermintrude"}),
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 23,
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name string `hcl:"name"`
|
||||
}{"Ermintrude"}),
|
||||
1, // Extraneous "age" property
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 50,
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
Attrs hcl.Attributes `hcl:",remain"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
got := gotI.(struct {
|
||||
Name string `hcl:"name"`
|
||||
Attrs hcl.Attributes `hcl:",remain"`
|
||||
})
|
||||
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 50,
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
Remain hcl.Body `hcl:",remain"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
got := gotI.(struct {
|
||||
Name string `hcl:"name"`
|
||||
Remain hcl.Body `hcl:",remain"`
|
||||
})
|
||||
|
||||
attrs, _ := got.Remain.JustAttributes()
|
||||
|
||||
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"living": true,
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
Remain map[string]cty.Value `hcl:",remain"`
|
||||
}{}),
|
||||
deepEquals(struct {
|
||||
Name string `hcl:"name"`
|
||||
Remain map[string]cty.Value `hcl:",remain"`
|
||||
}{
|
||||
Name: "Ermintrude",
|
||||
Remain: map[string]cty.Value{
|
||||
"living": cty.True,
|
||||
},
|
||||
}),
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 50,
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Name string `hcl:"name"`
|
||||
Body hcl.Body `hcl:",body"`
|
||||
Remain hcl.Body `hcl:",remain"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
got := gotI.(struct {
|
||||
Name string `hcl:"name"`
|
||||
Body hcl.Body `hcl:",body"`
|
||||
Remain hcl.Body `hcl:",remain"`
|
||||
})
|
||||
|
||||
attrs, _ := got.Body.JustAttributes()
|
||||
|
||||
return got.Name == "Ermintrude" && len(attrs) == 2 &&
|
||||
attrs["name"] != nil && attrs["age"] != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating no diagnostics is good enough for this one.
|
||||
return true
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating no diagnostics is good enough for this one.
|
||||
return true
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}, {}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating one diagnostic is good enough for this one.
|
||||
return true
|
||||
},
|
||||
1,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating one diagnostic is good enough for this one.
|
||||
return true
|
||||
},
|
||||
1,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating one diagnostic is good enough for this one.
|
||||
return true
|
||||
},
|
||||
1,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
return gotI.(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}).Noodle != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
return gotI.(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}).Noodle != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
return gotI.(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}).Noodle == nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}, {}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle *struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// Generating one diagnostic is good enough for this one.
|
||||
return true
|
||||
},
|
||||
1,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodle := gotI.(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}).Noodle
|
||||
return len(noodle) == 0
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodle := gotI.(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}).Noodle
|
||||
return len(noodle) == 1
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": []map[string]interface{}{{}, {}},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodle := gotI.(struct {
|
||||
Noodle []struct{} `hcl:"noodle,block"`
|
||||
}).Noodle
|
||||
return len(noodle) == 2
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
//nolint:misspell
|
||||
// Generating two diagnostics is good enough for this one.
|
||||
// (one for the missing noodle block and the other for
|
||||
// the JSON serialization detecting the missing level of
|
||||
// heirarchy for the label.)
|
||||
return true
|
||||
},
|
||||
2,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{
|
||||
"foo_foo": map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodle := gotI.(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}).Noodle
|
||||
return noodle.Name == "foo_foo"
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{
|
||||
"foo_foo": map[string]interface{}{},
|
||||
"bar_baz": map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
// One diagnostic is enough for this one.
|
||||
return true
|
||||
},
|
||||
1,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{
|
||||
"foo_foo": map[string]interface{}{},
|
||||
"bar_baz": map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodles []struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodles := gotI.(struct {
|
||||
Noodles []struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"noodle,block"`
|
||||
}).Noodles
|
||||
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"noodle": map[string]interface{}{
|
||||
"foo_foo": map[string]interface{}{
|
||||
"type": "rice",
|
||||
},
|
||||
},
|
||||
},
|
||||
makeInstantiateType(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
Type string `hcl:"type"`
|
||||
} `hcl:"noodle,block"`
|
||||
}{}),
|
||||
func(gotI interface{}) bool {
|
||||
noodle := gotI.(struct {
|
||||
Noodle struct {
|
||||
Name string `hcl:"name,label"`
|
||||
Type string `hcl:"type"`
|
||||
} `hcl:"noodle,block"`
|
||||
}).Noodle
|
||||
return noodle.Name == "foo_foo" && noodle.Type == "rice"
|
||||
},
|
||||
0,
|
||||
},
|
||||
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 34,
|
||||
},
|
||||
makeInstantiateType(map[string]string(nil)),
|
||||
deepEquals(map[string]string{
|
||||
"name": "Ermintrude",
|
||||
"age": "34",
|
||||
}),
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 89,
|
||||
},
|
||||
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||
func(gotI interface{}) bool {
|
||||
got := gotI.(map[string]*hcl.Attribute)
|
||||
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"age": 13,
|
||||
},
|
||||
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||
func(gotI interface{}) bool {
|
||||
got := gotI.(map[string]hcl.Expression)
|
||||
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"name": "Ermintrude",
|
||||
"living": true,
|
||||
},
|
||||
makeInstantiateType(map[string]cty.Value(nil)),
|
||||
deepEquals(map[string]cty.Value{
|
||||
"name": cty.StringVal("Ermintrude"),
|
||||
"living": cty.True,
|
||||
}),
|
||||
0,
|
||||
},
|
||||
{
|
||||
// Retain "nested" block while decoding
|
||||
map[string]interface{}{
|
||||
"plain": "foo",
|
||||
},
|
||||
func() interface{} {
|
||||
return &withNestedBlock{
|
||||
Plain: "bar",
|
||||
Nested: &withTwoAttributes{
|
||||
A: "bar",
|
||||
},
|
||||
}
|
||||
},
|
||||
func(gotI interface{}) bool {
|
||||
foo := gotI.(withNestedBlock)
|
||||
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
// Retain values in "nested" block while decoding
|
||||
map[string]interface{}{
|
||||
"nested": map[string]interface{}{
|
||||
"a": "foo",
|
||||
},
|
||||
},
|
||||
func() interface{} {
|
||||
return &withNestedBlock{
|
||||
Nested: &withTwoAttributes{
|
||||
B: "bar",
|
||||
},
|
||||
}
|
||||
},
|
||||
func(gotI interface{}) bool {
|
||||
foo := gotI.(withNestedBlock)
|
||||
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
// Retain values in "nested" block list while decoding
|
||||
map[string]interface{}{
|
||||
"nested": []map[string]interface{}{
|
||||
{
|
||||
"a": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
func() interface{} {
|
||||
return &withListofNestedBlocks{
|
||||
Nested: []*withTwoAttributes{
|
||||
{
|
||||
B: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
func(gotI interface{}) bool {
|
||||
n := gotI.(withListofNestedBlocks)
|
||||
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
// Remove additional elements from the list while decoding nested blocks
|
||||
map[string]interface{}{
|
||||
"nested": []map[string]interface{}{
|
||||
{
|
||||
"a": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
func() interface{} {
|
||||
return &withListofNestedBlocks{
|
||||
Nested: []*withTwoAttributes{
|
||||
{
|
||||
B: "bar",
|
||||
},
|
||||
{
|
||||
B: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
func(gotI interface{}) bool {
|
||||
n := gotI.(withListofNestedBlocks)
|
||||
return len(n.Nested) == 1
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
// Make sure decoding value slices works the same as pointer slices.
|
||||
map[string]interface{}{
|
||||
"nested": []map[string]interface{}{
|
||||
{
|
||||
"b": "bar",
|
||||
},
|
||||
{
|
||||
"b": "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
func() interface{} {
|
||||
return &withListofNestedBlocksNoPointers{
|
||||
Nested: []withTwoAttributes{
|
||||
{
|
||||
B: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
func(gotI interface{}) bool {
|
||||
n := gotI.(withListofNestedBlocksNoPointers)
|
||||
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||
},
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// For convenience here we're going to use the JSON parser
|
||||
// to process the given body.
|
||||
buf, err := json.Marshal(test.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
|
||||
}
|
||||
|
||||
t.Run(string(buf), func(t *testing.T) {
|
||||
file, diags := hclJSON.Parse(buf, "test.json")
|
||||
if len(diags) != 0 {
|
||||
t.Fatalf("diagnostics while parsing: %s", diags.Error())
|
||||
}
|
||||
|
||||
targetVal := reflect.ValueOf(test.Target())
|
||||
|
||||
diags = DecodeBody(file.Body, nil, targetVal.Interface())
|
||||
if len(diags) != test.DiagCount {
|
||||
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||
for _, diag := range diags {
|
||||
t.Logf(" - %s", diag.Error())
|
||||
}
|
||||
}
|
||||
got := targetVal.Elem().Interface()
|
||||
if !test.Check(got) {
|
||||
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeExpression(t *testing.T) {
|
||||
tests := []struct {
|
||||
Value cty.Value
|
||||
Target interface{}
|
||||
Want interface{}
|
||||
DiagCount int
|
||||
}{
|
||||
{
|
||||
cty.StringVal("hello"),
|
||||
"",
|
||||
"hello",
|
||||
0,
|
||||
},
|
||||
{
|
||||
cty.StringVal("hello"),
|
||||
cty.NilVal,
|
||||
cty.StringVal("hello"),
|
||||
0,
|
||||
},
|
||||
{
|
||||
cty.NumberIntVal(2),
|
||||
"",
|
||||
"2",
|
||||
0,
|
||||
},
|
||||
{
|
||||
cty.StringVal("true"),
|
||||
false,
|
||||
true,
|
||||
0,
|
||||
},
|
||||
{
|
||||
cty.NullVal(cty.String),
|
||||
"",
|
||||
"",
|
||||
1, // null value is not allowed
|
||||
},
|
||||
{
|
||||
cty.UnknownVal(cty.String),
|
||||
"",
|
||||
"",
|
||||
1, // value must be known
|
||||
},
|
||||
{
|
||||
cty.ListVal([]cty.Value{cty.True}),
|
||||
false,
|
||||
false,
|
||||
1, // bool required
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
|
||||
expr := &fixedExpression{test.Value}
|
||||
|
||||
targetVal := reflect.New(reflect.TypeOf(test.Target))
|
||||
|
||||
diags := DecodeExpression(expr, nil, targetVal.Interface())
|
||||
if len(diags) != test.DiagCount {
|
||||
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||
for _, diag := range diags {
|
||||
t.Logf(" - %s", diag.Error())
|
||||
}
|
||||
}
|
||||
got := targetVal.Elem().Interface()
|
||||
if !reflect.DeepEqual(got, test.Want) {
|
||||
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fixedExpression struct {
|
||||
val cty.Value
|
||||
}
|
||||
|
||||
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
return e.val, nil
|
||||
}
|
||||
|
||||
func (e *fixedExpression) Range() (r hcl.Range) {
|
||||
return
|
||||
}
|
||||
|
||||
func (e *fixedExpression) StartRange() (r hcl.Range) {
|
||||
return
|
||||
}
|
||||
|
||||
func (e *fixedExpression) Variables() []hcl.Traversal {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeInstantiateType(target interface{}) func() interface{} {
|
||||
return func() interface{} {
|
||||
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||
//
|
||||
// It provides a convenient and concise way of describing the schema for
|
||||
// configuration and then accessing the resulting data via native Go
|
||||
// types.
|
||||
//
|
||||
// A struct field tag scheme is used, similar to other decoding and
|
||||
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||
//
|
||||
// ThingType string `hcl:"thing_type,attr"`
|
||||
//
|
||||
// Within each tag there are two comma-separated tokens. The first is the
|
||||
// name of the corresponding construct in configuration, while the second
|
||||
// is a keyword giving the kind of construct expected. The following
|
||||
// kind keywords are supported:
|
||||
//
|
||||
// attr (the default) indicates that the value is to be populated from an attribute
|
||||
// block indicates that the value is to populated from a block
|
||||
// label indicates that the value is to populated from a block label
|
||||
// optional is the same as attr, but the field is optional
|
||||
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||
//
|
||||
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||
// expression is assigned, or of any type accepted by gocty, in which case
|
||||
// gocty will be used to assign the value to a native Go type.
|
||||
//
|
||||
// "block" fields may be a struct that recursively uses the same tags, or a
|
||||
// slice of such structs, in which case multiple blocks of the corresponding
|
||||
// type are decoded into the slice.
|
||||
//
|
||||
// "body" can be placed on a single field of type hcl.Body to capture
|
||||
// the full hcl.Body that was decoded for a block. This does not allow leftover
|
||||
// values like "remain", so a decoding error will still be returned if leftover
|
||||
// fields are given. If you want to capture the decoding body PLUS leftover
|
||||
// fields, you must specify a "remain" field as well to prevent errors. The
|
||||
// body field and the remain field will both contain the leftover fields.
|
||||
//
|
||||
// "label" fields are considered only in a struct used as the type of a field
|
||||
// marked as "block", and are used sequentially to capture the labels of
|
||||
// the blocks being decoded. In this case, the name token is used only as
|
||||
// an identifier for the label in diagnostic messages.
|
||||
//
|
||||
// "optional" fields behave like "attr" fields, but they are optional
|
||||
// and will not give parsing errors if they are missing.
|
||||
//
|
||||
// "remain" can be placed on a single field that may be either of type
|
||||
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||
// placed into this field for delayed processing. If no "remain" field is
|
||||
// present then any attributes or blocks not matched by another valid tag
|
||||
// will cause an error diagnostic.
|
||||
//
|
||||
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||
// on the constraints there.
|
||||
//
|
||||
// Broadly-speaking this package deals with two types of error. The first is
|
||||
// errors in the configuration itself, which are returned as diagnostics
|
||||
// written with the configuration author as the target audience. The second
|
||||
// is bugs in the calling program, such as invalid struct tags, which are
|
||||
// surfaced via panics since there can be no useful runtime handling of such
|
||||
// errors and they should certainly not be returned to the user as diagnostics.
|
||||
package gohcl
|
||||
@@ -1,192 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||
// attributes and blocks derived from the given value, which must be a
|
||||
// struct value or a pointer to a struct value with the struct tags defined
|
||||
// in this package.
|
||||
//
|
||||
// This function can work only with fully-decoded data. It will ignore any
|
||||
// fields tagged as "remain", any fields that decode attributes into either
|
||||
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||
// into hcl.Attributes values. This function does not have enough information
|
||||
// to complete the decoding of these types.
|
||||
//
|
||||
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||
// to produce a whole hclwrite.Block including block labels.
|
||||
//
|
||||
// As long as a suitable value is given to encode and the destination body
|
||||
// is non-nil, this function will always complete. It will panic in case of
|
||||
// any errors in the calling program, such as passing an inappropriate type
|
||||
// or a nil body.
|
||||
//
|
||||
// The layout of the resulting HCL source is derived from the ordering of
|
||||
// the struct fields, with blank lines around nested blocks of different types.
|
||||
// Fields representing attributes should usually precede those representing
|
||||
// blocks so that the attributes can group togather in the result. For more
|
||||
// control, use the hclwrite API directly.
|
||||
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
populateBody(rv, ty, tags, dst)
|
||||
}
|
||||
|
||||
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||
// the given value, which must be a struct or pointer to struct with the
|
||||
// struct tags defined in this package.
|
||||
//
|
||||
// If the given struct type has fields tagged with "label" tags then they
|
||||
// will be used in order to annotate the created block with labels.
|
||||
//
|
||||
// This function has the same constraints as EncodeIntoBody and will panic
|
||||
// if they are violated.
|
||||
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
labels := make([]string, len(tags.Labels))
|
||||
for i, lf := range tags.Labels {
|
||||
lv := rv.Field(lf.FieldIndex)
|
||||
// We just stringify whatever we find. It should always be a string
|
||||
// but if not then we'll still do something reasonable.
|
||||
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||
}
|
||||
|
||||
block := hclwrite.NewBlock(blockType, labels)
|
||||
populateBody(rv, ty, tags, block.Body())
|
||||
return block
|
||||
}
|
||||
|
||||
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||
for n, i := range tags.Attributes {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
for n, i := range tags.Blocks {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||
ni, nj := namesOrder[i], namesOrder[j]
|
||||
return nameIdxs[ni] < nameIdxs[nj]
|
||||
})
|
||||
|
||||
dst.Clear()
|
||||
|
||||
prevWasBlock := false
|
||||
for _, name := range namesOrder {
|
||||
fieldIdx := nameIdxs[name]
|
||||
field := ty.Field(fieldIdx)
|
||||
fieldTy := field.Type
|
||||
fieldVal := rv.Field(fieldIdx)
|
||||
|
||||
if fieldTy.Kind() == reflect.Ptr {
|
||||
fieldTy = fieldTy.Elem()
|
||||
fieldVal = fieldVal.Elem()
|
||||
}
|
||||
|
||||
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
if prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = false
|
||||
}
|
||||
|
||||
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||
}
|
||||
|
||||
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||
if err != nil {
|
||||
// This should never happen, since we should always be able
|
||||
// to decode into the implied type.
|
||||
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||
}
|
||||
|
||||
dst.SetAttributeValue(name, val)
|
||||
} else { // must be a block, then
|
||||
elemTy := fieldTy
|
||||
isSeq := false
|
||||
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||
isSeq = true
|
||||
elemTy = elemTy.Elem()
|
||||
}
|
||||
|
||||
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
prevWasBlock = false
|
||||
|
||||
if isSeq {
|
||||
l := fieldVal.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
elemVal := fieldVal.Index(i)
|
||||
if !elemVal.IsValid() {
|
||||
continue // ignore (elem value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
} else {
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/gohcl"
|
||||
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||
)
|
||||
|
||||
func ExampleEncodeIntoBody() {
|
||||
type Service struct {
|
||||
Name string `hcl:"name,label"`
|
||||
Exe []string `hcl:"executable"`
|
||||
}
|
||||
type Constraints struct {
|
||||
OS string `hcl:"os"`
|
||||
Arch string `hcl:"arch"`
|
||||
}
|
||||
type App struct {
|
||||
Name string `hcl:"name"`
|
||||
Desc string `hcl:"description"`
|
||||
Constraints *Constraints `hcl:"constraints,block"`
|
||||
Services []Service `hcl:"service,block"`
|
||||
}
|
||||
|
||||
app := App{
|
||||
Name: "awesome-app",
|
||||
Desc: "Such an awesome application",
|
||||
Constraints: &Constraints{
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
},
|
||||
Services: []Service{
|
||||
{
|
||||
Name: "web",
|
||||
Exe: []string{"./web", "--listen=:8080"},
|
||||
},
|
||||
{
|
||||
Name: "worker",
|
||||
Exe: []string{"./worker"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
f := hclwrite.NewEmptyFile()
|
||||
gohcl.EncodeIntoBody(&app, f.Body())
|
||||
fmt.Printf("%s", f.Bytes())
|
||||
|
||||
// Output:
|
||||
// name = "awesome-app"
|
||||
// description = "Such an awesome application"
|
||||
//
|
||||
// constraints {
|
||||
// os = "linux"
|
||||
// arch = "amd64"
|
||||
// }
|
||||
//
|
||||
// service "web" {
|
||||
// executable = ["./web", "--listen=:8080"]
|
||||
// }
|
||||
// service "worker" {
|
||||
// executable = ["./worker"]
|
||||
// }
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||
// given value, which must be a struct value or a pointer to one. If an
|
||||
// inappropriate value is passed, this function will panic.
|
||||
//
|
||||
// The second return argument indicates whether the given struct includes
|
||||
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||
//
|
||||
// This uses the tags on the fields of the struct to discover how each
|
||||
// field's value should be expressed within configuration. If an invalid
|
||||
// mapping is attempted, this function will panic.
|
||||
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
|
||||
ty := reflect.TypeOf(val)
|
||||
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
ty = ty.Elem()
|
||||
}
|
||||
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||
}
|
||||
|
||||
var attrSchemas []hcl.AttributeSchema
|
||||
var blockSchemas []hcl.BlockHeaderSchema
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
|
||||
attrNames := make([]string, 0, len(tags.Attributes))
|
||||
for n := range tags.Attributes {
|
||||
attrNames = append(attrNames, n)
|
||||
}
|
||||
sort.Strings(attrNames)
|
||||
for _, n := range attrNames {
|
||||
idx := tags.Attributes[n]
|
||||
optional := tags.Optional[n]
|
||||
field := ty.Field(idx)
|
||||
|
||||
var required bool
|
||||
|
||||
switch {
|
||||
case field.Type.AssignableTo(exprType):
|
||||
//nolint:misspell
|
||||
// If we're decoding to hcl.Expression then absense can be
|
||||
// indicated via a null value, so we don't specify that
|
||||
// the field is required during decoding.
|
||||
required = false
|
||||
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||
required = true
|
||||
default:
|
||||
required = false
|
||||
}
|
||||
|
||||
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||
Name: n,
|
||||
Required: required,
|
||||
})
|
||||
}
|
||||
|
||||
blockNames := make([]string, 0, len(tags.Blocks))
|
||||
for n := range tags.Blocks {
|
||||
blockNames = append(blockNames, n)
|
||||
}
|
||||
sort.Strings(blockNames)
|
||||
for _, n := range blockNames {
|
||||
idx := tags.Blocks[n]
|
||||
field := ty.Field(idx)
|
||||
fty := field.Type
|
||||
if fty.Kind() == reflect.Slice {
|
||||
fty = fty.Elem()
|
||||
}
|
||||
if fty.Kind() == reflect.Ptr {
|
||||
fty = fty.Elem()
|
||||
}
|
||||
if fty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf(
|
||||
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||
))
|
||||
}
|
||||
ftags := getFieldTags(fty)
|
||||
var labelNames []string
|
||||
if len(ftags.Labels) > 0 {
|
||||
labelNames = make([]string, len(ftags.Labels))
|
||||
for i, l := range ftags.Labels {
|
||||
labelNames[i] = l.Name
|
||||
}
|
||||
}
|
||||
|
||||
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||
Type: n,
|
||||
LabelNames: labelNames,
|
||||
})
|
||||
}
|
||||
|
||||
partial = tags.Remain != nil
|
||||
schema = &hcl.BodySchema{
|
||||
Attributes: attrSchemas,
|
||||
Blocks: blockSchemas,
|
||||
}
|
||||
return schema, partial
|
||||
}
|
||||
|
||||
type fieldTags struct {
|
||||
Attributes map[string]int
|
||||
Blocks map[string]int
|
||||
Labels []labelField
|
||||
Remain *int
|
||||
Body *int
|
||||
Optional map[string]bool
|
||||
}
|
||||
|
||||
type labelField struct {
|
||||
FieldIndex int
|
||||
Name string
|
||||
}
|
||||
|
||||
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||
ret := &fieldTags{
|
||||
Attributes: map[string]int{},
|
||||
Blocks: map[string]int{},
|
||||
Optional: map[string]bool{},
|
||||
}
|
||||
|
||||
ct := ty.NumField()
|
||||
for i := 0; i < ct; i++ {
|
||||
field := ty.Field(i)
|
||||
tag := field.Tag.Get("hcl")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
comma := strings.Index(tag, ",")
|
||||
var name, kind string
|
||||
if comma != -1 {
|
||||
name = tag[:comma]
|
||||
kind = tag[comma+1:]
|
||||
} else {
|
||||
name = tag
|
||||
kind = "attr"
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case "attr":
|
||||
ret.Attributes[name] = i
|
||||
case "block":
|
||||
ret.Blocks[name] = i
|
||||
case "label":
|
||||
ret.Labels = append(ret.Labels, labelField{
|
||||
FieldIndex: i,
|
||||
Name: name,
|
||||
})
|
||||
case "remain":
|
||||
if ret.Remain != nil {
|
||||
panic("only one 'remain' tag is permitted")
|
||||
}
|
||||
idx := i // copy, because this loop will continue assigning to i
|
||||
ret.Remain = &idx
|
||||
case "body":
|
||||
if ret.Body != nil {
|
||||
panic("only one 'body' tag is permitted")
|
||||
}
|
||||
idx := i // copy, because this loop will continue assigning to i
|
||||
ret.Body = &idx
|
||||
case "optional":
|
||||
ret.Attributes[name] = i
|
||||
ret.Optional[name] = true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
@@ -1,233 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
func TestImpliedBodySchema(t *testing.T) {
|
||||
tests := []struct {
|
||||
val interface{}
|
||||
wantSchema *hcl.BodySchema
|
||||
wantPartial bool
|
||||
}{
|
||||
{
|
||||
struct{}{},
|
||||
&hcl.BodySchema{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Ignored bool
|
||||
}{},
|
||||
&hcl.BodySchema{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Attr1 bool `hcl:"attr1"`
|
||||
Attr2 bool `hcl:"attr2"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "attr1",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "attr2",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Attr *bool `hcl:"attr,attr"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "attr",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Thing struct{} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Thing struct {
|
||||
Type string `hcl:"type,label"`
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
LabelNames: []string{"type", "name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Thing []struct {
|
||||
Type string `hcl:"type,label"`
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
LabelNames: []string{"type", "name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Thing *struct {
|
||||
Type string `hcl:"type,label"`
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
LabelNames: []string{"type", "name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Thing struct {
|
||||
Name string `hcl:"name,label"`
|
||||
Something string `hcl:"something"`
|
||||
} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
LabelNames: []string{"name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Doodad string `hcl:"doodad"`
|
||||
Thing struct {
|
||||
Name string `hcl:"name,label"`
|
||||
} `hcl:"thing,block"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "doodad",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "thing",
|
||||
LabelNames: []string{"name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Doodad string `hcl:"doodad"`
|
||||
Config string `hcl:",remain"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "doodad",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Expr hcl.Expression `hcl:"expr"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "expr",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
struct {
|
||||
Meh string `hcl:"meh,optional"`
|
||||
}{},
|
||||
&hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "meh",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
|
||||
schema, partial := ImpliedBodySchema(test.val)
|
||||
if !reflect.DeepEqual(schema, test.wantSchema) {
|
||||
t.Errorf(
|
||||
"wrong schema\ngot: %s\nwant: %s",
|
||||
spew.Sdump(schema), spew.Sdump(test.wantSchema),
|
||||
)
|
||||
}
|
||||
|
||||
if partial != test.wantPartial {
|
||||
t.Errorf(
|
||||
"wrong partial flag\ngot: %#v\nwant: %#v",
|
||||
partial, test.wantPartial,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
var victimExpr hcl.Expression
|
||||
var victimBody hcl.Body
|
||||
|
||||
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
|
||||
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,228 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Forked from https://github.com/hashicorp/hcl/blob/4679383728fe331fc8a6b46036a27b8f818d9bc0/merged.go
|
||||
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// MergeFiles combines the given files to produce a single body that contains
|
||||
// configuration from all of the given files.
|
||||
//
|
||||
// The ordering of the given files decides the order in which contained
|
||||
// elements will be returned. If any top-level attributes are defined with
|
||||
// the same name across multiple files, a diagnostic will be produced from
|
||||
// the Content and PartialContent methods describing this error in a
|
||||
// user-friendly way.
|
||||
func MergeFiles(files []*hcl.File) hcl.Body {
|
||||
var bodies []hcl.Body
|
||||
for _, file := range files {
|
||||
bodies = append(bodies, file.Body)
|
||||
}
|
||||
return MergeBodies(bodies)
|
||||
}
|
||||
|
||||
// MergeBodies is like MergeFiles except it deals directly with bodies, rather
|
||||
// than with entire files.
|
||||
func MergeBodies(bodies []hcl.Body) hcl.Body {
|
||||
if len(bodies) == 0 {
|
||||
// Swap out for our singleton empty body, to reduce the number of
|
||||
// empty slices we have hanging around.
|
||||
return emptyBody
|
||||
}
|
||||
|
||||
// If any of the given bodies are already merged bodies, we'll unpack
|
||||
// to flatten to a single mergedBodies, since that's conceptually simpler.
|
||||
// This also, as a side-effect, eliminates any empty bodies, since
|
||||
// empties are merged bodies with no inner bodies.
|
||||
var newLen int
|
||||
var flatten bool
|
||||
for _, body := range bodies {
|
||||
if children, merged := body.(mergedBodies); merged {
|
||||
newLen += len(children)
|
||||
flatten = true
|
||||
} else {
|
||||
newLen++
|
||||
}
|
||||
}
|
||||
|
||||
if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside
|
||||
return mergedBodies(bodies)
|
||||
}
|
||||
|
||||
if newLen == 0 {
|
||||
// Don't allocate a new empty when we already have one
|
||||
return emptyBody
|
||||
}
|
||||
|
||||
n := make([]hcl.Body, 0, newLen)
|
||||
for _, body := range bodies {
|
||||
if children, merged := body.(mergedBodies); merged {
|
||||
n = append(n, children...)
|
||||
} else {
|
||||
n = append(n, body)
|
||||
}
|
||||
}
|
||||
return mergedBodies(n)
|
||||
}
|
||||
|
||||
var emptyBody = mergedBodies([]hcl.Body{})
|
||||
|
||||
// EmptyBody returns a body with no content. This body can be used as a
|
||||
// placeholder when a body is required but no body content is available.
|
||||
func EmptyBody() hcl.Body {
|
||||
return emptyBody
|
||||
}
|
||||
|
||||
type mergedBodies []hcl.Body
|
||||
|
||||
// Content returns the content produced by applying the given schema to all
|
||||
// of the merged bodies and merging the result.
|
||||
//
|
||||
// Although required attributes _are_ supported, they should be used sparingly
|
||||
// with merged bodies since in this case there is no contextual information
|
||||
// with which to return good diagnostics. Applications working with merged
|
||||
// bodies may wish to mark all attributes as optional and then check for
|
||||
// required attributes afterwards, to produce better diagnostics.
|
||||
func (mb mergedBodies) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||
// the returned body will always be empty in this case, because mergedContent
|
||||
// will only ever call Content on the child bodies.
|
||||
content, _, diags := mb.mergedContent(schema, false)
|
||||
return content, diags
|
||||
}
|
||||
|
||||
func (mb mergedBodies) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||
return mb.mergedContent(schema, true)
|
||||
}
|
||||
|
||||
func (mb mergedBodies) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||
attrs := make(map[string]*hcl.Attribute)
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
for _, body := range mb {
|
||||
thisAttrs, thisDiags := body.JustAttributes()
|
||||
|
||||
if len(thisDiags) != 0 {
|
||||
diags = append(diags, thisDiags...)
|
||||
}
|
||||
|
||||
for name, attr := range thisAttrs {
|
||||
if existing := attrs[name]; existing != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Duplicate argument",
|
||||
Detail: fmt.Sprintf(
|
||||
"Argument %q was already set at %s",
|
||||
name, existing.NameRange.String(),
|
||||
),
|
||||
Subject: thisAttrs[name].NameRange.Ptr(),
|
||||
})
|
||||
}
|
||||
attrs[name] = attr
|
||||
}
|
||||
}
|
||||
|
||||
return attrs, diags
|
||||
}
|
||||
|
||||
func (mb mergedBodies) MissingItemRange() hcl.Range {
|
||||
if len(mb) == 0 {
|
||||
// Nothing useful to return here, so we'll return some garbage.
|
||||
return hcl.Range{
|
||||
Filename: "<empty>",
|
||||
}
|
||||
}
|
||||
|
||||
// arbitrarily use the first body's missing item range
|
||||
return mb[0].MissingItemRange()
|
||||
}
|
||||
|
||||
func (mb mergedBodies) mergedContent(schema *hcl.BodySchema, partial bool) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||
// We need to produce a new schema with none of the attributes marked as
|
||||
// required, since _any one_ of our bodies can contribute an attribute value.
|
||||
// We'll separately check that all required attributes are present at
|
||||
// the end.
|
||||
mergedSchema := &hcl.BodySchema{
|
||||
Blocks: schema.Blocks,
|
||||
}
|
||||
for _, attrS := range schema.Attributes {
|
||||
mergedAttrS := attrS
|
||||
mergedAttrS.Required = false
|
||||
mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
|
||||
}
|
||||
|
||||
var mergedLeftovers []hcl.Body
|
||||
content := &hcl.BodyContent{
|
||||
Attributes: map[string]*hcl.Attribute{},
|
||||
}
|
||||
|
||||
var diags hcl.Diagnostics
|
||||
for _, body := range mb {
|
||||
var thisContent *hcl.BodyContent
|
||||
var thisLeftovers hcl.Body
|
||||
var thisDiags hcl.Diagnostics
|
||||
|
||||
if partial {
|
||||
thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
|
||||
} else {
|
||||
thisContent, thisDiags = body.Content(mergedSchema)
|
||||
}
|
||||
|
||||
if thisLeftovers != nil {
|
||||
mergedLeftovers = append(mergedLeftovers, thisLeftovers)
|
||||
}
|
||||
if len(thisDiags) != 0 {
|
||||
diags = append(diags, thisDiags...)
|
||||
}
|
||||
|
||||
if thisContent.Attributes != nil {
|
||||
for name, attr := range thisContent.Attributes {
|
||||
if existing := content.Attributes[name]; existing != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Duplicate argument",
|
||||
Detail: fmt.Sprintf(
|
||||
"Argument %q was already set at %s",
|
||||
name, existing.NameRange.String(),
|
||||
),
|
||||
Subject: thisContent.Attributes[name].NameRange.Ptr(),
|
||||
})
|
||||
}
|
||||
content.Attributes[name] = attr
|
||||
}
|
||||
}
|
||||
|
||||
if len(thisContent.Blocks) != 0 {
|
||||
content.Blocks = append(content.Blocks, thisContent.Blocks...)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, we check for required attributes.
|
||||
for _, attrS := range schema.Attributes {
|
||||
if !attrS.Required {
|
||||
continue
|
||||
}
|
||||
|
||||
if content.Attributes[attrS.Name] == nil {
|
||||
// We don't have any context here to produce a good diagnostic,
|
||||
// which is why we warn in the Content docstring to minimize the
|
||||
// use of required attributes on merged bodies.
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing required argument",
|
||||
Detail: fmt.Sprintf(
|
||||
"The argument %q is required, but was not set.",
|
||||
attrS.Name,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
leftoverBody := MergeBodies(mergedLeftovers)
|
||||
return content, leftoverBody, diags
|
||||
}
|
||||
@@ -1,9 +1,6 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||
@@ -17,245 +14,113 @@ import (
|
||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||
)
|
||||
|
||||
type funcDef struct {
|
||||
name string
|
||||
fn function.Function
|
||||
factory func() function.Function
|
||||
}
|
||||
|
||||
var stdlibFunctions = []funcDef{
|
||||
{name: "absolute", fn: stdlib.AbsoluteFunc},
|
||||
{name: "add", fn: stdlib.AddFunc},
|
||||
{name: "and", fn: stdlib.AndFunc},
|
||||
{name: "base64decode", fn: encoding.Base64DecodeFunc},
|
||||
{name: "base64encode", fn: encoding.Base64EncodeFunc},
|
||||
{name: "basename", factory: basenameFunc},
|
||||
{name: "bcrypt", fn: crypto.BcryptFunc},
|
||||
{name: "byteslen", fn: stdlib.BytesLenFunc},
|
||||
{name: "bytesslice", fn: stdlib.BytesSliceFunc},
|
||||
{name: "can", fn: tryfunc.CanFunc},
|
||||
{name: "ceil", fn: stdlib.CeilFunc},
|
||||
{name: "chomp", fn: stdlib.ChompFunc},
|
||||
{name: "chunklist", fn: stdlib.ChunklistFunc},
|
||||
{name: "cidrhost", fn: cidr.HostFunc},
|
||||
{name: "cidrnetmask", fn: cidr.NetmaskFunc},
|
||||
{name: "cidrsubnet", fn: cidr.SubnetFunc},
|
||||
{name: "cidrsubnets", fn: cidr.SubnetsFunc},
|
||||
{name: "coalesce", fn: stdlib.CoalesceFunc},
|
||||
{name: "coalescelist", fn: stdlib.CoalesceListFunc},
|
||||
{name: "compact", fn: stdlib.CompactFunc},
|
||||
{name: "concat", fn: stdlib.ConcatFunc},
|
||||
{name: "contains", fn: stdlib.ContainsFunc},
|
||||
{name: "convert", fn: typeexpr.ConvertFunc},
|
||||
{name: "csvdecode", fn: stdlib.CSVDecodeFunc},
|
||||
{name: "dirname", factory: dirnameFunc},
|
||||
{name: "distinct", fn: stdlib.DistinctFunc},
|
||||
{name: "divide", fn: stdlib.DivideFunc},
|
||||
{name: "element", fn: stdlib.ElementFunc},
|
||||
{name: "equal", fn: stdlib.EqualFunc},
|
||||
{name: "flatten", fn: stdlib.FlattenFunc},
|
||||
{name: "floor", fn: stdlib.FloorFunc},
|
||||
{name: "format", fn: stdlib.FormatFunc},
|
||||
{name: "formatdate", fn: stdlib.FormatDateFunc},
|
||||
{name: "formatlist", fn: stdlib.FormatListFunc},
|
||||
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
|
||||
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
|
||||
{name: "hasindex", fn: stdlib.HasIndexFunc},
|
||||
{name: "indent", fn: stdlib.IndentFunc},
|
||||
{name: "index", fn: stdlib.IndexFunc},
|
||||
{name: "indexof", factory: indexOfFunc},
|
||||
{name: "int", fn: stdlib.IntFunc},
|
||||
{name: "join", fn: stdlib.JoinFunc},
|
||||
{name: "jsondecode", fn: stdlib.JSONDecodeFunc},
|
||||
{name: "jsonencode", fn: stdlib.JSONEncodeFunc},
|
||||
{name: "keys", fn: stdlib.KeysFunc},
|
||||
{name: "length", fn: stdlib.LengthFunc},
|
||||
{name: "lessthan", fn: stdlib.LessThanFunc},
|
||||
{name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc},
|
||||
{name: "log", fn: stdlib.LogFunc},
|
||||
{name: "lookup", fn: stdlib.LookupFunc},
|
||||
{name: "lower", fn: stdlib.LowerFunc},
|
||||
{name: "max", fn: stdlib.MaxFunc},
|
||||
{name: "md5", fn: crypto.Md5Func},
|
||||
{name: "merge", fn: stdlib.MergeFunc},
|
||||
{name: "min", fn: stdlib.MinFunc},
|
||||
{name: "modulo", fn: stdlib.ModuloFunc},
|
||||
{name: "multiply", fn: stdlib.MultiplyFunc},
|
||||
{name: "negate", fn: stdlib.NegateFunc},
|
||||
{name: "not", fn: stdlib.NotFunc},
|
||||
{name: "notequal", fn: stdlib.NotEqualFunc},
|
||||
{name: "or", fn: stdlib.OrFunc},
|
||||
{name: "parseint", fn: stdlib.ParseIntFunc},
|
||||
{name: "pow", fn: stdlib.PowFunc},
|
||||
{name: "range", fn: stdlib.RangeFunc},
|
||||
{name: "regex_replace", fn: stdlib.RegexReplaceFunc},
|
||||
{name: "regex", fn: stdlib.RegexFunc},
|
||||
{name: "regexall", fn: stdlib.RegexAllFunc},
|
||||
{name: "replace", fn: stdlib.ReplaceFunc},
|
||||
{name: "reverse", fn: stdlib.ReverseFunc},
|
||||
{name: "reverselist", fn: stdlib.ReverseListFunc},
|
||||
{name: "rsadecrypt", fn: crypto.RsaDecryptFunc},
|
||||
{name: "sanitize", factory: sanitizeFunc},
|
||||
{name: "sethaselement", fn: stdlib.SetHasElementFunc},
|
||||
{name: "setintersection", fn: stdlib.SetIntersectionFunc},
|
||||
{name: "setproduct", fn: stdlib.SetProductFunc},
|
||||
{name: "setsubtract", fn: stdlib.SetSubtractFunc},
|
||||
{name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc},
|
||||
{name: "setunion", fn: stdlib.SetUnionFunc},
|
||||
{name: "sha1", fn: crypto.Sha1Func},
|
||||
{name: "sha256", fn: crypto.Sha256Func},
|
||||
{name: "sha512", fn: crypto.Sha512Func},
|
||||
{name: "signum", fn: stdlib.SignumFunc},
|
||||
{name: "slice", fn: stdlib.SliceFunc},
|
||||
{name: "sort", fn: stdlib.SortFunc},
|
||||
{name: "split", fn: stdlib.SplitFunc},
|
||||
{name: "strlen", fn: stdlib.StrlenFunc},
|
||||
{name: "substr", fn: stdlib.SubstrFunc},
|
||||
{name: "subtract", fn: stdlib.SubtractFunc},
|
||||
{name: "timeadd", fn: stdlib.TimeAddFunc},
|
||||
{name: "timestamp", factory: timestampFunc},
|
||||
{name: "title", fn: stdlib.TitleFunc},
|
||||
{name: "trim", fn: stdlib.TrimFunc},
|
||||
{name: "trimprefix", fn: stdlib.TrimPrefixFunc},
|
||||
{name: "trimspace", fn: stdlib.TrimSpaceFunc},
|
||||
{name: "trimsuffix", fn: stdlib.TrimSuffixFunc},
|
||||
{name: "try", fn: tryfunc.TryFunc},
|
||||
{name: "upper", fn: stdlib.UpperFunc},
|
||||
{name: "urlencode", fn: encoding.URLEncodeFunc},
|
||||
{name: "uuidv4", fn: uuid.V4Func},
|
||||
{name: "uuidv5", fn: uuid.V5Func},
|
||||
{name: "values", fn: stdlib.ValuesFunc},
|
||||
{name: "zipmap", fn: stdlib.ZipmapFunc},
|
||||
}
|
||||
|
||||
// indexOfFunc constructs a function that finds the element index for a given
|
||||
// value in a list.
|
||||
func indexOfFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "list",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.Number),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||
}
|
||||
|
||||
if !args[0].IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
|
||||
if args[0].LengthInt() == 0 { // Easy path
|
||||
return cty.NilVal, errors.New("cannot search an empty list")
|
||||
}
|
||||
|
||||
for it := args[0].ElementIterator(); it.Next(); {
|
||||
i, v := it.Element()
|
||||
eq, err := stdlib.Equal(v, args[1])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
if !eq.IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
if eq.True() {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// basenameFunc constructs a function that returns the last element of a path.
|
||||
func basenameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Base(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// dirnameFunc constructs a function that returns the directory of a path.
|
||||
func dirnameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Dir(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore,
|
||||
// leaving only characters that are valid for a Bake target name.
|
||||
func sanitizeFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "name",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
// only [a-zA-Z0-9_-]+ is allowed
|
||||
var b strings.Builder
|
||||
for _, r := range in {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return cty.StringVal(b.String()), nil
|
||||
},
|
||||
})
|
||||
var stdlibFunctions = map[string]function.Function{
|
||||
"absolute": stdlib.AbsoluteFunc,
|
||||
"add": stdlib.AddFunc,
|
||||
"and": stdlib.AndFunc,
|
||||
"base64decode": encoding.Base64DecodeFunc,
|
||||
"base64encode": encoding.Base64EncodeFunc,
|
||||
"bcrypt": crypto.BcryptFunc,
|
||||
"byteslen": stdlib.BytesLenFunc,
|
||||
"bytesslice": stdlib.BytesSliceFunc,
|
||||
"can": tryfunc.CanFunc,
|
||||
"ceil": stdlib.CeilFunc,
|
||||
"chomp": stdlib.ChompFunc,
|
||||
"chunklist": stdlib.ChunklistFunc,
|
||||
"cidrhost": cidr.HostFunc,
|
||||
"cidrnetmask": cidr.NetmaskFunc,
|
||||
"cidrsubnet": cidr.SubnetFunc,
|
||||
"cidrsubnets": cidr.SubnetsFunc,
|
||||
"csvdecode": stdlib.CSVDecodeFunc,
|
||||
"coalesce": stdlib.CoalesceFunc,
|
||||
"coalescelist": stdlib.CoalesceListFunc,
|
||||
"compact": stdlib.CompactFunc,
|
||||
"concat": stdlib.ConcatFunc,
|
||||
"contains": stdlib.ContainsFunc,
|
||||
"convert": typeexpr.ConvertFunc,
|
||||
"distinct": stdlib.DistinctFunc,
|
||||
"divide": stdlib.DivideFunc,
|
||||
"element": stdlib.ElementFunc,
|
||||
"equal": stdlib.EqualFunc,
|
||||
"flatten": stdlib.FlattenFunc,
|
||||
"floor": stdlib.FloorFunc,
|
||||
"formatdate": stdlib.FormatDateFunc,
|
||||
"format": stdlib.FormatFunc,
|
||||
"formatlist": stdlib.FormatListFunc,
|
||||
"greaterthan": stdlib.GreaterThanFunc,
|
||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
||||
"hasindex": stdlib.HasIndexFunc,
|
||||
"indent": stdlib.IndentFunc,
|
||||
"index": stdlib.IndexFunc,
|
||||
"int": stdlib.IntFunc,
|
||||
"jsondecode": stdlib.JSONDecodeFunc,
|
||||
"jsonencode": stdlib.JSONEncodeFunc,
|
||||
"keys": stdlib.KeysFunc,
|
||||
"join": stdlib.JoinFunc,
|
||||
"length": stdlib.LengthFunc,
|
||||
"lessthan": stdlib.LessThanFunc,
|
||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
||||
"log": stdlib.LogFunc,
|
||||
"lookup": stdlib.LookupFunc,
|
||||
"lower": stdlib.LowerFunc,
|
||||
"max": stdlib.MaxFunc,
|
||||
"md5": crypto.Md5Func,
|
||||
"merge": stdlib.MergeFunc,
|
||||
"min": stdlib.MinFunc,
|
||||
"modulo": stdlib.ModuloFunc,
|
||||
"multiply": stdlib.MultiplyFunc,
|
||||
"negate": stdlib.NegateFunc,
|
||||
"notequal": stdlib.NotEqualFunc,
|
||||
"not": stdlib.NotFunc,
|
||||
"or": stdlib.OrFunc,
|
||||
"parseint": stdlib.ParseIntFunc,
|
||||
"pow": stdlib.PowFunc,
|
||||
"range": stdlib.RangeFunc,
|
||||
"regexall": stdlib.RegexAllFunc,
|
||||
"regex": stdlib.RegexFunc,
|
||||
"regex_replace": stdlib.RegexReplaceFunc,
|
||||
"reverse": stdlib.ReverseFunc,
|
||||
"reverselist": stdlib.ReverseListFunc,
|
||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
||||
"sethaselement": stdlib.SetHasElementFunc,
|
||||
"setintersection": stdlib.SetIntersectionFunc,
|
||||
"setproduct": stdlib.SetProductFunc,
|
||||
"setsubtract": stdlib.SetSubtractFunc,
|
||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
||||
"setunion": stdlib.SetUnionFunc,
|
||||
"sha1": crypto.Sha1Func,
|
||||
"sha256": crypto.Sha256Func,
|
||||
"sha512": crypto.Sha512Func,
|
||||
"signum": stdlib.SignumFunc,
|
||||
"slice": stdlib.SliceFunc,
|
||||
"sort": stdlib.SortFunc,
|
||||
"split": stdlib.SplitFunc,
|
||||
"strlen": stdlib.StrlenFunc,
|
||||
"substr": stdlib.SubstrFunc,
|
||||
"subtract": stdlib.SubtractFunc,
|
||||
"timeadd": stdlib.TimeAddFunc,
|
||||
"timestamp": timestampFunc,
|
||||
"title": stdlib.TitleFunc,
|
||||
"trim": stdlib.TrimFunc,
|
||||
"trimprefix": stdlib.TrimPrefixFunc,
|
||||
"trimspace": stdlib.TrimSpaceFunc,
|
||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
||||
"try": tryfunc.TryFunc,
|
||||
"upper": stdlib.UpperFunc,
|
||||
"urlencode": encoding.URLEncodeFunc,
|
||||
"uuidv4": uuid.V4Func,
|
||||
"uuidv5": uuid.V5Func,
|
||||
"values": stdlib.ValuesFunc,
|
||||
"zipmap": stdlib.ZipmapFunc,
|
||||
}
|
||||
|
||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
||||
//
|
||||
// This function was imported from terraform's datetime utilities.
|
||||
func timestampFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func Stdlib() map[string]function.Function {
|
||||
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
||||
for _, v := range stdlibFunctions {
|
||||
if v.factory != nil {
|
||||
funcs[v.name] = v.factory()
|
||||
} else {
|
||||
funcs[v.name] = v.fn
|
||||
}
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
var timestampFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestIndexOf(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
key cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"index 0": {
|
||||
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||
key: cty.StringVal("one"),
|
||||
want: cty.NumberIntVal(0),
|
||||
},
|
||||
"index 3": {
|
||||
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||
key: cty.StringVal("four"),
|
||||
want: cty.NumberIntVal(3),
|
||||
},
|
||||
"index -1": {
|
||||
input: cty.TupleVal([]cty.Value{cty.StringVal("one"), cty.NumberIntVal(2.0), cty.NumberIntVal(3), cty.StringVal("four")}),
|
||||
key: cty.StringVal("3"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := indexOfFunc().Call([]cty.Value{test.input, test.key})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasename(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("bar."),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("bar.."),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("bar..."),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := basenameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirname(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := dirnameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal(""),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo_bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foobar"),
|
||||
want: cty.StringVal("foobar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("foo/bar."),
|
||||
want: cty.StringVal("foo_bar_"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("foo/bar.."),
|
||||
want: cty.StringVal("foo_bar__"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("foo/bar..."),
|
||||
want: cty.StringVal("foo_bar___"),
|
||||
},
|
||||
"utf8": {
|
||||
input: cty.StringVal("foo/🍕bar"),
|
||||
want: cty.StringVal("foo__bar"),
|
||||
},
|
||||
"symbols": {
|
||||
input: cty.StringVal("foo/bar!@(ba+z)"),
|
||||
want: cty.StringVal("foo_bar___ba_z_"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := sanitizeFunc().Call([]cty.Value{test.input})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) 2017-2018 Martin Atkins
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
|
||||
// to find a suitable cty.Type instance that could be used for a conversion
|
||||
// with ToCtyValue.
|
||||
//
|
||||
// This allows -- for simple situations at least -- types to be defined just
|
||||
// once in Go and the cty types derived from the Go types, but in the process
|
||||
// it makes some assumptions that may be undesirable so applications are
|
||||
// encouraged to build their cty types directly if exacting control is
|
||||
// required.
|
||||
//
|
||||
// Not all Go types can be represented as cty types, so an error may be
|
||||
// returned which is usually considered to be a bug in the calling program.
|
||||
// In particular, ImpliedType will never use capsule types in its returned
|
||||
// type, because it cannot know the capsule types supported by the calling
|
||||
// program.
|
||||
func ImpliedType(gv interface{}) (cty.Type, error) {
|
||||
rt := reflect.TypeOf(gv)
|
||||
var path cty.Path
|
||||
return impliedType(rt, path)
|
||||
}
|
||||
|
||||
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||
if ety, err := impliedTypeExt(rt, path); err == nil {
|
||||
return ety, nil
|
||||
}
|
||||
|
||||
switch rt.Kind() {
|
||||
case reflect.Ptr:
|
||||
return impliedType(rt.Elem(), path)
|
||||
|
||||
// Primitive types
|
||||
case reflect.Bool:
|
||||
return cty.Bool, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return cty.Number, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return cty.Number, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return cty.Number, nil
|
||||
case reflect.String:
|
||||
return cty.String, nil
|
||||
|
||||
// Collection types
|
||||
case reflect.Slice:
|
||||
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
|
||||
ety, err := impliedType(rt.Elem(), path)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
return cty.List(ety), nil
|
||||
case reflect.Map:
|
||||
if !stringType.AssignableTo(rt.Key()) {
|
||||
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
|
||||
}
|
||||
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
|
||||
ety, err := impliedType(rt.Elem(), path)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
return cty.Map(ety), nil
|
||||
|
||||
// Structural types
|
||||
case reflect.Struct:
|
||||
return impliedStructType(rt, path)
|
||||
|
||||
default:
|
||||
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
|
||||
}
|
||||
}
|
||||
|
||||
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||
if valueType.AssignableTo(rt) {
|
||||
// Special case: cty.Value represents cty.DynamicPseudoType, for
|
||||
// type conformance checking.
|
||||
return cty.DynamicPseudoType, nil
|
||||
}
|
||||
|
||||
fieldIdxs := structTagIndices(rt)
|
||||
if len(fieldIdxs) == 0 {
|
||||
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
|
||||
}
|
||||
|
||||
atys := make(map[string]cty.Type, len(fieldIdxs))
|
||||
|
||||
{
|
||||
// Temporary extension of path for attributes
|
||||
path := append(path, nil)
|
||||
|
||||
for k, fi := range fieldIdxs {
|
||||
path[len(path)-1] = cty.GetAttrStep{Name: k}
|
||||
|
||||
ft := rt.Field(fi).Type
|
||||
aty, err := impliedType(ft, path)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
atys[k] = aty
|
||||
}
|
||||
}
|
||||
|
||||
return cty.Object(atys), nil
|
||||
}
|
||||
|
||||
var (
|
||||
valueType = reflect.TypeOf(cty.Value{})
|
||||
stringType = reflect.TypeOf("")
|
||||
)
|
||||
|
||||
// structTagIndices interrogates the fields of the given type (which must
|
||||
// be a struct type, or we'll panic) and returns a map from the cty
|
||||
// attribute names declared via struct tags to the indices of the
|
||||
// fields holding those tags.
|
||||
//
|
||||
// This function will panic if two fields within the struct are tagged with
|
||||
// the same cty attribute name.
|
||||
func structTagIndices(st reflect.Type) map[string]int {
|
||||
ct := st.NumField()
|
||||
ret := make(map[string]int, ct)
|
||||
|
||||
for i := 0; i < ct; i++ {
|
||||
field := st.Field(i)
|
||||
attrName := field.Tag.Get("cty")
|
||||
if attrName != "" {
|
||||
ret[attrName] = i
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
type ToCtyValueConverter interface {
|
||||
// ToCtyValue will convert this capsule value into a native
|
||||
// cty.Value. This should not return a capsule type.
|
||||
ToCtyValue() cty.Value
|
||||
}
|
||||
|
||||
type FromCtyValueConverter interface {
|
||||
// FromCtyValue will initialize this value using a cty.Value.
|
||||
FromCtyValue(in cty.Value, path cty.Path) error
|
||||
}
|
||||
|
||||
type extensionType int
|
||||
|
||||
const (
|
||||
unwrapCapsuleValueExtension extensionType = iota
|
||||
)
|
||||
|
||||
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
|
||||
if rt.Kind() != reflect.Pointer {
|
||||
rt = reflect.PointerTo(rt)
|
||||
}
|
||||
|
||||
if isCapsuleType(rt) {
|
||||
return capsuleValueCapsuleType(rt), nil
|
||||
}
|
||||
return cty.NilType, errdefs.ErrNotImplemented
|
||||
}
|
||||
|
||||
func isCapsuleType(rt reflect.Type) bool {
|
||||
fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
|
||||
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||
return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
|
||||
}
|
||||
|
||||
var capsuleValueTypes sync.Map
|
||||
|
||||
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
|
||||
if rt.Kind() != reflect.Pointer {
|
||||
panic("capsule value must be a pointer")
|
||||
}
|
||||
|
||||
elem := rt.Elem()
|
||||
if val, loaded := capsuleValueTypes.Load(elem); loaded {
|
||||
return val.(cty.Type)
|
||||
}
|
||||
|
||||
toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
|
||||
|
||||
// First time used. Initialize new capsule ops.
|
||||
ops := &cty.CapsuleOps{
|
||||
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
|
||||
return func(in cty.Value, p cty.Path) (any, error) {
|
||||
rv := reflect.New(elem).Interface()
|
||||
if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
},
|
||||
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
|
||||
return func(in any, _ cty.Path) (cty.Value, error) {
|
||||
rv := reflect.ValueOf(in).Convert(toCtyValueType)
|
||||
v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
|
||||
return convert.Convert(v, want)
|
||||
}
|
||||
},
|
||||
ExtensionData: func(key any) any {
|
||||
switch key {
|
||||
case unwrapCapsuleValueExtension:
|
||||
zero := reflect.Zero(elem).Interface()
|
||||
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||
return conv.ToCtyValue().Type()
|
||||
}
|
||||
|
||||
zero = reflect.Zero(rt).Interface()
|
||||
if conv, ok := zero.(ToCtyValueConverter); ok {
|
||||
return conv.ToCtyValue().Type()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Attempt to store the new type. Use whichever was loaded first in the case
|
||||
// of a race condition.
|
||||
ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
|
||||
val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
|
||||
return val.(cty.Type)
|
||||
}
|
||||
|
||||
// UnwrapCtyValue will unwrap capsule type values into their native cty value
|
||||
// equivalents if possible.
|
||||
func UnwrapCtyValue(in cty.Value) cty.Value {
|
||||
want := toCtyValueType(in.Type())
|
||||
if in.Type().Equals(want) {
|
||||
return in
|
||||
} else if out, err := convert.Convert(in, want); err == nil {
|
||||
return out
|
||||
}
|
||||
return cty.NullVal(want)
|
||||
}
|
||||
|
||||
func toCtyValueType(in cty.Type) cty.Type {
|
||||
if et := in.MapElementType(); et != nil {
|
||||
return cty.Map(toCtyValueType(*et))
|
||||
}
|
||||
|
||||
if et := in.SetElementType(); et != nil {
|
||||
return cty.Set(toCtyValueType(*et))
|
||||
}
|
||||
|
||||
if et := in.ListElementType(); et != nil {
|
||||
return cty.List(toCtyValueType(*et))
|
||||
}
|
||||
|
||||
if in.IsObjectType() {
|
||||
var optional []string
|
||||
inAttrTypes := in.AttributeTypes()
|
||||
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
|
||||
for name, typ := range inAttrTypes {
|
||||
outAttrTypes[name] = toCtyValueType(typ)
|
||||
if in.AttributeOptional(name) {
|
||||
optional = append(optional, name)
|
||||
}
|
||||
}
|
||||
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
|
||||
}
|
||||
|
||||
if in.IsTupleType() {
|
||||
inTypes := in.TupleElementTypes()
|
||||
outTypes := make([]cty.Type, len(inTypes))
|
||||
for i, typ := range inTypes {
|
||||
outTypes[i] = toCtyValueType(typ)
|
||||
}
|
||||
return cty.Tuple(outTypes)
|
||||
}
|
||||
|
||||
if in.IsCapsuleType() {
|
||||
if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
|
||||
return out.(cty.Type)
|
||||
}
|
||||
return cty.DynamicPseudoType
|
||||
}
|
||||
|
||||
return in
|
||||
}
|
||||
|
||||
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
|
||||
out, err := gocty.ToCtyValue(val, ty)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return UnwrapCtyValue(out), nil
|
||||
}
|
||||
@@ -4,61 +4,27 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/frontend/dockerui"
|
||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const maxBakeDefinitionSize = 2 * 1024 * 1024 // 2 MB
|
||||
|
||||
type Input struct {
|
||||
State *llb.State
|
||||
URL string
|
||||
}
|
||||
|
||||
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||
var sessions []session.Attachable
|
||||
var filename string
|
||||
|
||||
st, ok := dockerui.DetectGitContext(url, false)
|
||||
if ok {
|
||||
if ssh, err := controllerapi.CreateSSH([]*controllerapi.SSH{{
|
||||
ID: "default",
|
||||
Paths: strings.Split(os.Getenv("BUILDX_BAKE_GIT_SSH"), ","),
|
||||
}}); err == nil {
|
||||
sessions = append(sessions, ssh)
|
||||
}
|
||||
var gitAuthSecrets []*controllerapi.Secret
|
||||
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
|
||||
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||
ID: llb.GitAuthTokenKey,
|
||||
Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
|
||||
})
|
||||
}
|
||||
if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
|
||||
gitAuthSecrets = append(gitAuthSecrets, &controllerapi.Secret{
|
||||
ID: llb.GitAuthHeaderKey,
|
||||
Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
|
||||
})
|
||||
}
|
||||
if len(gitAuthSecrets) > 0 {
|
||||
if secrets, err := controllerapi.CreateSecrets(gitAuthSecrets); err == nil {
|
||||
sessions = append(sessions, secrets)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
st, filename, ok = dockerui.DetectHTTPContext(url)
|
||||
st, ok := detectGitContext(url)
|
||||
if !ok {
|
||||
st, filename, ok = detectHTTPContext(url)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("not url context")
|
||||
}
|
||||
@@ -68,9 +34,9 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
||||
var files []File
|
||||
|
||||
var node *builder.Node
|
||||
for i, n := range nodes {
|
||||
for _, n := range nodes {
|
||||
if n.Err == nil {
|
||||
node = &nodes[i]
|
||||
node = &n
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -85,7 +51,7 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
||||
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
_, err = c.Build(ctx, client.SolveOpt{Session: sessions, Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
def, err := st.Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -109,6 +75,7 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
||||
}
|
||||
return nil, err
|
||||
}, ch)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -116,6 +83,51 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
|
||||
return files, inp, nil
|
||||
}
|
||||
|
||||
func IsRemoteURL(url string) bool {
|
||||
if _, _, ok := detectHTTPContext(url); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := detectGitContext(url); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func detectHTTPContext(url string) (*llb.State, string, bool) {
|
||||
if httpPrefix.MatchString(url) {
|
||||
httpContext := llb.HTTP(url, llb.Filename("context"), llb.WithCustomName("[internal] load remote build context"))
|
||||
return &httpContext, "context", true
|
||||
}
|
||||
return nil, "", false
|
||||
}
|
||||
|
||||
func detectGitContext(ref string) (*llb.State, bool) {
|
||||
found := false
|
||||
if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) {
|
||||
found = true
|
||||
}
|
||||
|
||||
for _, prefix := range []string{"git://", "github.com/", "git@"} {
|
||||
if strings.HasPrefix(ref, prefix) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
parts := strings.SplitN(ref, "#", 2)
|
||||
branch := ""
|
||||
if len(parts) > 1 {
|
||||
branch = parts[1]
|
||||
}
|
||||
gitOpts := []llb.GitOption{llb.WithCustomName("[internal] load git source " + ref)}
|
||||
|
||||
st := llb.Git(parts[0], branch, gitOpts...)
|
||||
return &st, true
|
||||
}
|
||||
|
||||
func isArchive(header []byte) bool {
|
||||
for _, m := range [][]byte{
|
||||
{0x42, 0x5A, 0x68}, // bzip2
|
||||
@@ -180,9 +192,9 @@ func filesFromURLRef(ctx context.Context, c gwclient.Client, ref gwclient.Refere
|
||||
name := inp.URL
|
||||
inp.URL = ""
|
||||
|
||||
if int64(len(dt)) > stat.Size {
|
||||
if stat.Size > maxBakeDefinitionSize {
|
||||
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size (%s)", units.HumanSize(maxBakeDefinitionSize))
|
||||
if len(dt) > stat.Size() {
|
||||
if stat.Size() > 1024*512 {
|
||||
return nil, errors.Errorf("non-archive definition URL bigger than maximum allowed size")
|
||||
}
|
||||
|
||||
dt, err = ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||
|
||||
1668
build/build.go
1668
build/build.go
File diff suppressed because it is too large
Load Diff
@@ -1,62 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"net"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platform *v1.Platform) (net.Conn, error) {
|
||||
nodes, err := filterAvailableNodes(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.New("no nodes available")
|
||||
}
|
||||
|
||||
var pls []v1.Platform
|
||||
if platform != nil {
|
||||
pls = []v1.Platform{*platform}
|
||||
}
|
||||
|
||||
opts := map[string]Options{"default": {Platforms: pls}}
|
||||
resolved, err := resolveDrivers(ctx, nodes, opts, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dialError error
|
||||
for _, ls := range resolved {
|
||||
for _, rn := range ls {
|
||||
if platform != nil {
|
||||
p := *platform
|
||||
var found bool
|
||||
for _, pp := range rn.platforms {
|
||||
if platforms.Only(p).Match(pp) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := nodes[rn.driverIndex].Driver.Dial(ctx)
|
||||
if err == nil {
|
||||
return conn, nil
|
||||
}
|
||||
dialError = stderrors.Join(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(dialError, "no nodes available")
|
||||
}
|
||||
352
build/driver.go
352
build/driver.go
@@ -1,352 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type resolvedNode struct {
|
||||
resolver *nodeResolver
|
||||
driverIndex int
|
||||
platforms []specs.Platform
|
||||
}
|
||||
|
||||
func (dp resolvedNode) Node() builder.Node {
|
||||
return dp.resolver.nodes[dp.driverIndex]
|
||||
}
|
||||
|
||||
func (dp resolvedNode) Client(ctx context.Context) (*client.Client, error) {
|
||||
clients, err := dp.resolver.boot(ctx, []int{dp.driverIndex}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clients[0], nil
|
||||
}
|
||||
|
||||
func (dp resolvedNode) BuildOpts(ctx context.Context) (gateway.BuildOpts, error) {
|
||||
opts, err := dp.resolver.opts(ctx, []int{dp.driverIndex}, nil)
|
||||
if err != nil {
|
||||
return gateway.BuildOpts{}, err
|
||||
}
|
||||
return opts[0], nil
|
||||
}
|
||||
|
||||
type matchMaker func(specs.Platform) platforms.MatchComparer
|
||||
|
||||
type cachedGroup[T any] struct {
|
||||
g flightcontrol.Group[T]
|
||||
cache map[int]T
|
||||
cacheMu sync.Mutex
|
||||
}
|
||||
|
||||
func newCachedGroup[T any]() cachedGroup[T] {
|
||||
return cachedGroup[T]{
|
||||
cache: map[int]T{},
|
||||
}
|
||||
}
|
||||
|
||||
type nodeResolver struct {
|
||||
nodes []builder.Node
|
||||
clients cachedGroup[*client.Client]
|
||||
buildOpts cachedGroup[gateway.BuildOpts]
|
||||
}
|
||||
|
||||
func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||
driverRes := newDriverResolver(nodes)
|
||||
drivers, err := driverRes.Resolve(ctx, opt, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return drivers, err
|
||||
}
|
||||
|
||||
func newDriverResolver(nodes []builder.Node) *nodeResolver {
|
||||
r := &nodeResolver{
|
||||
nodes: nodes,
|
||||
clients: newCachedGroup[*client.Client](),
|
||||
buildOpts: newCachedGroup[gateway.BuildOpts](),
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *nodeResolver) Resolve(ctx context.Context, opt map[string]Options, pw progress.Writer) (map[string][]*resolvedNode, error) {
|
||||
if len(r.nodes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nodes := map[string][]*resolvedNode{}
|
||||
for k, opt := range opt {
|
||||
node, perfect, err := r.resolve(ctx, opt.Platforms, pw, platforms.OnlyStrict, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !perfect {
|
||||
break
|
||||
}
|
||||
nodes[k] = node
|
||||
}
|
||||
if len(nodes) != len(opt) {
|
||||
// if we didn't get a perfect match, we need to boot all drivers
|
||||
allIndexes := make([]int, len(r.nodes))
|
||||
for i := range allIndexes {
|
||||
allIndexes[i] = i
|
||||
}
|
||||
|
||||
clients, err := r.boot(ctx, allIndexes, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
workers := make([][]specs.Platform, len(clients))
|
||||
for i, c := range clients {
|
||||
i, c := i, c
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
eg.Go(func() error {
|
||||
ww, err := c.ListWorkers(egCtx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "listing workers")
|
||||
}
|
||||
|
||||
ps := make(map[string]specs.Platform, len(ww))
|
||||
for _, w := range ww {
|
||||
for _, p := range w.Platforms {
|
||||
pk := platforms.Format(platforms.Normalize(p))
|
||||
ps[pk] = p
|
||||
}
|
||||
}
|
||||
for _, p := range ps {
|
||||
workers[i] = append(workers[i], p)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// then we can attempt to match against all the available platforms
|
||||
// (this time we don't care about imperfect matches)
|
||||
nodes = map[string][]*resolvedNode{}
|
||||
for k, opt := range opt {
|
||||
node, _, err := r.resolve(ctx, opt.Platforms, pw, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||
return workers[idx]
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes[k] = node
|
||||
}
|
||||
}
|
||||
|
||||
idxs := make([]int, 0, len(r.nodes))
|
||||
for _, nodes := range nodes {
|
||||
for _, node := range nodes {
|
||||
idxs = append(idxs, node.driverIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// preload capabilities
|
||||
span, ctx := tracing.StartSpan(ctx, "load buildkit capabilities", trace.WithSpanKind(trace.SpanKindInternal))
|
||||
_, err := r.opts(ctx, idxs, pw)
|
||||
tracing.FinishWithError(span, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (r *nodeResolver) resolve(ctx context.Context, ps []specs.Platform, pw progress.Writer, matcher matchMaker, additional func(idx int, n builder.Node) []specs.Platform) ([]*resolvedNode, bool, error) {
|
||||
if len(r.nodes) == 0 {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
perfect := true
|
||||
nodeIdxs := make([]int, 0)
|
||||
for _, p := range ps {
|
||||
idx := r.get(p, matcher, additional)
|
||||
if idx == -1 {
|
||||
idx = 0
|
||||
perfect = false
|
||||
}
|
||||
nodeIdxs = append(nodeIdxs, idx)
|
||||
}
|
||||
|
||||
var nodes []*resolvedNode
|
||||
if len(nodeIdxs) == 0 {
|
||||
nodes = append(nodes, &resolvedNode{
|
||||
resolver: r,
|
||||
driverIndex: 0,
|
||||
})
|
||||
nodeIdxs = append(nodeIdxs, 0)
|
||||
} else {
|
||||
for i, idx := range nodeIdxs {
|
||||
node := &resolvedNode{
|
||||
resolver: r,
|
||||
driverIndex: idx,
|
||||
}
|
||||
if len(ps) > 0 {
|
||||
node.platforms = []specs.Platform{ps[i]}
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
nodes = recombineNodes(nodes)
|
||||
if _, err := r.boot(ctx, nodeIdxs, pw); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return nodes, perfect, nil
|
||||
}
|
||||
|
||||
func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatforms func(int, builder.Node) []specs.Platform) int {
|
||||
best := -1
|
||||
bestPlatform := specs.Platform{}
|
||||
for i, node := range r.nodes {
|
||||
platforms := node.Platforms
|
||||
if additionalPlatforms != nil {
|
||||
platforms = append([]specs.Platform{}, platforms...)
|
||||
platforms = append(platforms, additionalPlatforms(i, node)...)
|
||||
}
|
||||
for _, p2 := range platforms {
|
||||
m := matcher(p2)
|
||||
if !m.Match(p) {
|
||||
continue
|
||||
}
|
||||
|
||||
if best == -1 {
|
||||
best = i
|
||||
bestPlatform = p2
|
||||
continue
|
||||
}
|
||||
if matcher(p2).Less(p, bestPlatform) {
|
||||
best = i
|
||||
bestPlatform = p2
|
||||
}
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
func (r *nodeResolver) boot(ctx context.Context, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
||||
clients := make([]*client.Client, len(idxs))
|
||||
|
||||
baseCtx := ctx
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
for i, idx := range idxs {
|
||||
i, idx := i, idx
|
||||
eg.Go(func() error {
|
||||
c, err := r.clients.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (*client.Client, error) {
|
||||
if r.nodes[idx].Driver == nil {
|
||||
return nil, nil
|
||||
}
|
||||
r.clients.cacheMu.Lock()
|
||||
c, ok := r.clients.cache[idx]
|
||||
r.clients.cacheMu.Unlock()
|
||||
if ok {
|
||||
return c, nil
|
||||
}
|
||||
c, err := driver.Boot(ctx, baseCtx, r.nodes[idx].Driver, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.clients.cacheMu.Lock()
|
||||
r.clients.cache[idx] = c
|
||||
r.clients.cacheMu.Unlock()
|
||||
return c, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clients[i] = c
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func (r *nodeResolver) opts(ctx context.Context, idxs []int, pw progress.Writer) ([]gateway.BuildOpts, error) {
|
||||
clients, err := r.boot(ctx, idxs, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bopts := make([]gateway.BuildOpts, len(clients))
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, idxs := range idxs {
|
||||
i, idx := i, idxs
|
||||
c := clients[i]
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
eg.Go(func() error {
|
||||
opt, err := r.buildOpts.g.Do(ctx, fmt.Sprint(idx), func(ctx context.Context) (gateway.BuildOpts, error) {
|
||||
r.buildOpts.cacheMu.Lock()
|
||||
opt, ok := r.buildOpts.cache[idx]
|
||||
r.buildOpts.cacheMu.Unlock()
|
||||
if ok {
|
||||
return opt, nil
|
||||
}
|
||||
_, err := c.Build(ctx, client.SolveOpt{
|
||||
Internal: true,
|
||||
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
opt = c.BuildOpts()
|
||||
return nil, nil
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return gateway.BuildOpts{}, err
|
||||
}
|
||||
r.buildOpts.cacheMu.Lock()
|
||||
r.buildOpts.cache[idx] = opt
|
||||
r.buildOpts.cacheMu.Unlock()
|
||||
return opt, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bopts[i] = opt
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bopts, nil
|
||||
}
|
||||
|
||||
// recombineDriverPairs recombines resolved nodes that are on the same driver
|
||||
// back together into a single node.
|
||||
func recombineNodes(nodes []*resolvedNode) []*resolvedNode {
|
||||
result := make([]*resolvedNode, 0, len(nodes))
|
||||
lookup := map[int]int{}
|
||||
for _, node := range nodes {
|
||||
if idx, ok := lookup[node.driverIndex]; ok {
|
||||
result[idx].platforms = append(result[idx].platforms, node.platforms...)
|
||||
} else {
|
||||
lookup[node.driverIndex] = len(result)
|
||||
result = append(result, node)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -1,315 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFindDriverSanity(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.DefaultSpec()},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.OnlyStrict, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
require.Equal(t, []specs.Platform{platforms.DefaultSpec()}, res[0].platforms)
|
||||
}
|
||||
|
||||
func TestFindDriverEmpty(t *testing.T) {
|
||||
r := makeTestResolver(nil)
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.DefaultSpec()}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestFindDriverWeirdName(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/foobar")},
|
||||
})
|
||||
|
||||
// find first platform
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/foobar")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 1, res[0].driverIndex)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestFindDriverUnknown(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeSinglePlatform(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||
})
|
||||
|
||||
// find first platform
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
|
||||
// find second platform
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 1, res[0].driverIndex)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
|
||||
// find an unknown platform, should match the first driver
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/s390x")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeMultiPlatform(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/amd64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 0, res[0].driverIndex)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, 1, res[0].driverIndex)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeNonStrict(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/arm64")},
|
||||
})
|
||||
|
||||
// arm64 should match itself
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
|
||||
// arm64 may support arm/v8
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
|
||||
// arm64 may support arm/v7
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeNonStrictARM(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/arm64")},
|
||||
"ccc": {platforms.MustParse("linux/arm/v8")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "ccc", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeNonStrictLower(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||
})
|
||||
|
||||
// v8 can't be built on v7 (so we should select the default)...
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v8")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
|
||||
// ...but v6 can be built on v8
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v6")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodePreferStart(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||
"ccc": {platforms.MustParse("linux/riscv64")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/riscv64")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodePreferExact(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/arm/v8")},
|
||||
"bbb": {platforms.MustParse("linux/arm/v7")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSelectNodeNoPlatform(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/foobar")},
|
||||
"bbb": {platforms.DefaultSpec()},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
require.Empty(t, res[0].platforms)
|
||||
}
|
||||
|
||||
func TestSelectNodeAdditionalPlatforms(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/arm/v8")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "bbb", res[0].Node().Builder)
|
||||
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{platforms.MustParse("linux/arm/v7")}, nil, platforms.Only, func(idx int, n builder.Node) []specs.Platform {
|
||||
if n.Builder == "aaa" {
|
||||
return []specs.Platform{platforms.MustParse("linux/arm/v7")}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSplitNodeMultiPlatform(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/arm64")},
|
||||
"bbb": {platforms.MustParse("linux/riscv64")},
|
||||
})
|
||||
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||
platforms.MustParse("linux/amd64"),
|
||||
platforms.MustParse("linux/arm64"),
|
||||
}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 1)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
|
||||
res, perfect, err = r.resolve(context.TODO(), []specs.Platform{
|
||||
platforms.MustParse("linux/amd64"),
|
||||
platforms.MustParse("linux/riscv64"),
|
||||
}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 2)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||
}
|
||||
|
||||
func TestSplitNodeMultiPlatformNoUnify(t *testing.T) {
|
||||
r := makeTestResolver(map[string][]specs.Platform{
|
||||
"aaa": {platforms.MustParse("linux/amd64")},
|
||||
"bbb": {platforms.MustParse("linux/amd64"), platforms.MustParse("linux/riscv64")},
|
||||
})
|
||||
|
||||
// the "best" choice would be the node with both platforms, but we're using
|
||||
// a naive algorithm that doesn't try to unify the platforms
|
||||
res, perfect, err := r.resolve(context.TODO(), []specs.Platform{
|
||||
platforms.MustParse("linux/amd64"),
|
||||
platforms.MustParse("linux/riscv64"),
|
||||
}, nil, platforms.Only, nil)
|
||||
require.NoError(t, err)
|
||||
require.True(t, perfect)
|
||||
require.Len(t, res, 2)
|
||||
require.Equal(t, "aaa", res[0].Node().Builder)
|
||||
require.Equal(t, "bbb", res[1].Node().Builder)
|
||||
}
|
||||
|
||||
func makeTestResolver(nodes map[string][]specs.Platform) *nodeResolver {
|
||||
var ns []builder.Node
|
||||
for name, platforms := range nodes {
|
||||
ns = append(ns, builder.Node{
|
||||
Builder: name,
|
||||
Platforms: platforms,
|
||||
})
|
||||
}
|
||||
sort.Slice(ns, func(i, j int) bool {
|
||||
return ns[i].Builder < ns[j].Builder
|
||||
})
|
||||
return newDriverResolver(ns)
|
||||
}
|
||||
111
build/git.go
111
build/git.go
@@ -9,27 +9,16 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/util/gitutil"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
||||
|
||||
type gitAttrsAppendFunc func(so *client.SolveOpt)
|
||||
|
||||
func gitAppendNoneFunc(_ *client.SolveOpt) {}
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) {
|
||||
defer func() {
|
||||
if f == nil {
|
||||
f = gitAppendNoneFunc
|
||||
}
|
||||
}()
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (res map[string]string, _ error) {
|
||||
res = make(map[string]string)
|
||||
if contextPath == "" {
|
||||
return nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
setGitLabels := false
|
||||
@@ -48,7 +37,7 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
|
||||
}
|
||||
|
||||
if !setGitLabels && !setGitInfo {
|
||||
return nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
// figure out in which directory the git command needs to run in
|
||||
@@ -56,42 +45,29 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
|
||||
if filepath.IsAbs(contextPath) {
|
||||
wd = contextPath
|
||||
} else {
|
||||
wd, _ = filepath.Abs(filepath.Join(osutil.GetWd(), contextPath))
|
||||
cwd, _ := os.Getwd()
|
||||
wd, _ = filepath.Abs(filepath.Join(cwd, contextPath))
|
||||
}
|
||||
wd = osutil.SanitizePath(wd)
|
||||
|
||||
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||
if err != nil {
|
||||
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||
return nil, errors.Wrap(err, "git was not found in the system")
|
||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||
return res, errors.New("buildx: git was not found in the system. Current commit information was not captured by the build")
|
||||
}
|
||||
return nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
if !gitc.IsInsideWorkTree() {
|
||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||
return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
return res, errors.New("buildx: failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
}
|
||||
return nil, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
root, err := gitc.RootDir()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get git root dir")
|
||||
}
|
||||
|
||||
res := make(map[string]string)
|
||||
|
||||
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
||||
return nil, errors.Wrap(err, "failed to get git commit")
|
||||
if sha, err := gitc.FullCommit(); err != nil {
|
||||
return res, errors.Wrapf(err, "buildx: failed to get git commit")
|
||||
} else if sha != "" {
|
||||
checkDirty := false
|
||||
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
||||
if v, err := strconv.ParseBool(v); err == nil {
|
||||
checkDirty = v
|
||||
}
|
||||
}
|
||||
if checkDirty && gitc.IsDirty() {
|
||||
if gitc.IsDirty() {
|
||||
sha += "-dirty"
|
||||
}
|
||||
if setGitLabels {
|
||||
@@ -111,50 +87,23 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
|
||||
}
|
||||
}
|
||||
|
||||
if setGitLabels && root != "" {
|
||||
if dockerfilePath == "" {
|
||||
dockerfilePath = filepath.Join(wd, "Dockerfile")
|
||||
}
|
||||
if !filepath.IsAbs(dockerfilePath) {
|
||||
dockerfilePath = filepath.Join(osutil.GetWd(), dockerfilePath)
|
||||
}
|
||||
if r, err := filepath.Rel(root, dockerfilePath); err == nil && !strings.HasPrefix(r, "..") {
|
||||
res["label:"+DockerfileLabel] = r
|
||||
if setGitLabels {
|
||||
if root, err := gitc.RootDir(); err != nil {
|
||||
return res, errors.Wrapf(err, "buildx: failed to get git root dir")
|
||||
} else if root != "" {
|
||||
if dockerfilePath == "" {
|
||||
dockerfilePath = filepath.Join(wd, "Dockerfile")
|
||||
}
|
||||
if !filepath.IsAbs(dockerfilePath) {
|
||||
cwd, _ := os.Getwd()
|
||||
dockerfilePath = filepath.Join(cwd, dockerfilePath)
|
||||
}
|
||||
dockerfilePath, _ = filepath.Rel(root, dockerfilePath)
|
||||
if !strings.HasPrefix(dockerfilePath, "..") {
|
||||
res["label:"+DockerfileLabel] = dockerfilePath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(so *client.SolveOpt) {
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = make(map[string]string)
|
||||
}
|
||||
for k, v := range res {
|
||||
so.FrontendAttrs[k] = v
|
||||
}
|
||||
|
||||
if !setGitInfo || root == "" {
|
||||
return
|
||||
}
|
||||
|
||||
for key, mount := range so.LocalMounts {
|
||||
fs, ok := mount.(*fs)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dir, err = filepath.Abs(dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||
dir = lp
|
||||
}
|
||||
dir = osutil.SanitizePath(dir)
|
||||
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/buildx/util/gitutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -23,7 +22,7 @@ func setupTest(tb testing.TB) {
|
||||
gitutil.GitInit(c, tb)
|
||||
|
||||
df := []byte("FROM alpine:latest\n")
|
||||
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||
assert.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
|
||||
|
||||
gitutil.GitAdd(c, tb, "Dockerfile")
|
||||
gitutil.GitCommit(c, tb, "initial commit")
|
||||
@@ -32,7 +31,7 @@ func setupTest(tb testing.TB) {
|
||||
|
||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||
@@ -46,11 +45,9 @@ func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||
func TestGetGitAttributesNoContext(t *testing.T) {
|
||||
setupTest(t)
|
||||
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
assert.Empty(t, so.FrontendAttrs)
|
||||
gitattrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, gitattrs)
|
||||
}
|
||||
|
||||
func TestGetGitAttributes(t *testing.T) {
|
||||
@@ -117,17 +114,15 @@ func TestGetGitAttributes(t *testing.T) {
|
||||
if tt.envGitInfo != "" {
|
||||
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
||||
}
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
gitattrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
for _, e := range tt.expected {
|
||||
assert.Contains(t, so.FrontendAttrs, e)
|
||||
assert.NotEmpty(t, so.FrontendAttrs[e])
|
||||
assert.Contains(t, gitattrs, e)
|
||||
assert.NotEmpty(t, gitattrs[e])
|
||||
if e == "label:"+DockerfileLabel {
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs[e])
|
||||
assert.Equal(t, "Dockerfile", gitattrs[e])
|
||||
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e])
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -136,7 +131,6 @@ func TestGetGitAttributes(t *testing.T) {
|
||||
|
||||
func TestGetGitAttributesDirty(t *testing.T) {
|
||||
setupTest(t)
|
||||
t.Setenv("BUILDX_GIT_CHECK_DIRTY", "true")
|
||||
|
||||
// make a change to test dirty flag
|
||||
df := []byte("FROM alpine:edge\n")
|
||||
@@ -144,78 +138,18 @@ func TestGetGitAttributesDirty(t *testing.T) {
|
||||
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
||||
|
||||
t.Setenv("BUILDX_GIT_LABELS", "true")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
gitattrs, _ := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
assert.Equal(t, 5, len(gitattrs))
|
||||
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
|
||||
assert.Equal(t, 5, len(so.FrontendAttrs))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"])
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty"))
|
||||
}
|
||||
|
||||
func TestLocalDirs(t *testing.T) {
|
||||
setupTest(t)
|
||||
|
||||
so := &client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", ".", so))
|
||||
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
}
|
||||
|
||||
func TestLocalDirsSub(t *testing.T) {
|
||||
gitutil.Mktmp(t)
|
||||
|
||||
c, err := gitutil.New()
|
||||
require.NoError(t, err)
|
||||
gitutil.GitInit(c, t)
|
||||
|
||||
df := []byte("FROM alpine:latest\n")
|
||||
require.NoError(t, os.MkdirAll("app", 0755))
|
||||
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
|
||||
|
||||
gitutil.GitAdd(c, t, "app/Dockerfile")
|
||||
gitutil.GitCommit(c, t, "initial commit")
|
||||
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
|
||||
|
||||
so := &client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", "app", so))
|
||||
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
assert.Contains(t, gitattrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
|
||||
assert.Contains(t, gitattrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
|
||||
}
|
||||
|
||||
138
build/invoke.go
138
build/invoke.go
@@ -1,138 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "crypto/sha256" // ensure digests can be computed
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
cancelOnce sync.Once
|
||||
containerCancel func(error)
|
||||
isUnavailable atomic.Bool
|
||||
initStarted atomic.Bool
|
||||
container gateway.Container
|
||||
releaseCh chan struct{}
|
||||
resultCtx *ResultHandle
|
||||
}
|
||||
|
||||
func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||
mainCtx := ctx
|
||||
|
||||
ctrCh := make(chan *Container)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
go func() {
|
||||
<-mainCtx.Done()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
}()
|
||||
|
||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||
defer containerCancel(errors.WithStack(context.Canceled))
|
||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
releaseCh := make(chan struct{})
|
||||
container := &Container{
|
||||
containerCancel: containerCancel,
|
||||
container: bkContainer,
|
||||
releaseCh: releaseCh,
|
||||
resultCtx: resultCtx,
|
||||
}
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
resultCtx.registerCleanup(func() {
|
||||
container.Cancel()
|
||||
<-doneCh
|
||||
})
|
||||
ctrCh <- container
|
||||
<-container.releaseCh
|
||||
|
||||
return nil, bkContainer.Release(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case ctr := <-ctrCh:
|
||||
return ctr, nil
|
||||
case err := <-errCh:
|
||||
return nil, err
|
||||
case <-mainCtx.Done():
|
||||
return nil, mainCtx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) Cancel() {
|
||||
c.markUnavailable()
|
||||
c.cancelOnce.Do(func() {
|
||||
if c.containerCancel != nil {
|
||||
c.containerCancel(errors.WithStack(context.Canceled))
|
||||
}
|
||||
close(c.releaseCh)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Container) IsUnavailable() bool {
|
||||
return c.isUnavailable.Load()
|
||||
}
|
||||
|
||||
func (c *Container) markUnavailable() {
|
||||
c.isUnavailable.Store(true)
|
||||
}
|
||||
|
||||
func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
if isInit := c.initStarted.CompareAndSwap(false, true); isInit {
|
||||
defer func() {
|
||||
// container can't be used after init exits
|
||||
c.markUnavailable()
|
||||
}()
|
||||
}
|
||||
err := exec(ctx, c.resultCtx, cfg, c.container, stdin, stdout, stderr)
|
||||
if err != nil {
|
||||
// Container becomes unavailable if one of the processes fails in it.
|
||||
c.markUnavailable()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proc, err := ctr.Start(ctx, processCfg)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to start container: %v", err)
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := proc.Signal(ctx, syscall.SIGKILL); err != nil {
|
||||
logrus.Warnf("failed to kill process: %v", err)
|
||||
}
|
||||
case <-doneCh:
|
||||
}
|
||||
}()
|
||||
|
||||
return proc.Wait()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
func saveLocalState(so *client.SolveOpt, target string, opts Options, node builder.Node, cfg *confutil.Config) error {
|
||||
var err error
|
||||
if so.Ref == "" || opts.CallFunc != nil {
|
||||
return nil
|
||||
}
|
||||
lp := opts.Inputs.ContextPath
|
||||
dp := opts.Inputs.DockerfilePath
|
||||
if dp != "" && !IsRemoteURL(lp) && lp != "-" && dp != "-" {
|
||||
dp, err = filepath.Abs(dp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if lp != "" && !IsRemoteURL(lp) && lp != "-" {
|
||||
lp, err = filepath.Abs(lp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if lp == "" && dp == "" {
|
||||
return nil
|
||||
}
|
||||
l, err := localstate.New(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveRef(node.Builder, node.Name, so.Ref, localstate.State{
|
||||
Target: target,
|
||||
LocalPath: lp,
|
||||
DockerfilePath: dp,
|
||||
GroupRef: opts.GroupRef,
|
||||
})
|
||||
}
|
||||
657
build/opt.go
657
build/opt.go
@@ -1,657 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/client/ociindex"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *Options, bopts gateway.BuildOpts, cfg *confutil.Config, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
nodeDriver := node.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
for _, f := range defers {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
releaseF()
|
||||
}
|
||||
}()
|
||||
|
||||
// inline cache from build arg
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_CACHE"]; ok {
|
||||
if v, _ := strconv.ParseBool(v); v {
|
||||
opt.CacheTo = append(opt.CacheTo, client.CacheOptionsEntry{
|
||||
Type: "inline",
|
||||
Attrs: map[string]string{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range opt.CacheTo {
|
||||
if e.Type != "inline" && !nodeDriver.Features(ctx)[driver.CacheExport] {
|
||||
return nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||
}
|
||||
}
|
||||
|
||||
cacheTo := make([]client.CacheOptionsEntry, 0, len(opt.CacheTo))
|
||||
for _, e := range opt.CacheTo {
|
||||
if e.Type == "gha" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||
continue
|
||||
}
|
||||
} else if e.Type == "s3" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
cacheTo = append(cacheTo, e)
|
||||
}
|
||||
|
||||
cacheFrom := make([]client.CacheOptionsEntry, 0, len(opt.CacheFrom))
|
||||
for _, e := range opt.CacheFrom {
|
||||
if e.Type == "gha" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||
continue
|
||||
}
|
||||
} else if e.Type == "s3" {
|
||||
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
cacheFrom = append(cacheFrom, e)
|
||||
}
|
||||
|
||||
so := client.SolveOpt{
|
||||
Ref: opt.Ref,
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: map[string]string{},
|
||||
LocalMounts: map[string]fsutil.FS{},
|
||||
CacheExports: cacheTo,
|
||||
CacheImports: cacheFrom,
|
||||
AllowedEntitlements: opt.Allow,
|
||||
SourcePolicy: opt.SourcePolicy,
|
||||
}
|
||||
|
||||
if opt.CgroupParent != "" {
|
||||
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
||||
}
|
||||
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||
if v, _ := strconv.ParseBool(v); v {
|
||||
so.FrontendAttrs["multi-platform"] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
if multiDriver {
|
||||
// force creation of manifest list
|
||||
so.FrontendAttrs["multi-platform"] = "true"
|
||||
}
|
||||
|
||||
attests := make(map[string]string)
|
||||
for k, v := range opt.Attests {
|
||||
if v != nil {
|
||||
attests[k] = *v
|
||||
}
|
||||
}
|
||||
|
||||
supportAttestations := bopts.LLBCaps.Contains(apicaps.CapID("exporter.image.attestations")) && nodeDriver.Features(ctx)[driver.MultiPlatform]
|
||||
if len(attests) > 0 {
|
||||
if !supportAttestations {
|
||||
if !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||
}
|
||||
return nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||
}
|
||||
for k, v := range attests {
|
||||
so.FrontendAttrs["attest:"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := opt.Attests["provenance"]; !ok && supportAttestations {
|
||||
const noAttestEnv = "BUILDX_NO_DEFAULT_ATTESTATIONS"
|
||||
var noProv bool
|
||||
if v, ok := os.LookupEnv(noAttestEnv); ok {
|
||||
noProv, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||
}
|
||||
}
|
||||
if !noProv {
|
||||
so.FrontendAttrs["attest:provenance"] = "mode=min,inline-only=true"
|
||||
}
|
||||
}
|
||||
|
||||
switch len(opt.Exports) {
|
||||
case 1:
|
||||
// valid
|
||||
case 0:
|
||||
if !noDefaultLoad() && opt.CallFunc == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
// backwards compat for docker driver only:
|
||||
// this ensures the build results in a docker image.
|
||||
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
||||
} else if nodeDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
opt.Exports = []client.ExportEntry{{Type: "docker", Attrs: map[string]string{}}}
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err := bopts.LLBCaps.Supports(pb.CapMultipleExporters); err != nil {
|
||||
return nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||
}
|
||||
}
|
||||
|
||||
// fill in image exporter names from tags
|
||||
if len(opt.Tags) > 0 {
|
||||
tags := make([]string, len(opt.Tags))
|
||||
for i, tag := range opt.Tags {
|
||||
ref, err := reference.Parse(tag)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||
}
|
||||
tags[i] = ref.String()
|
||||
}
|
||||
for i, e := range opt.Exports {
|
||||
switch e.Type {
|
||||
case "image", "oci", "docker":
|
||||
opt.Exports[i].Attrs["name"] = strings.Join(tags, ",")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, e := range opt.Exports {
|
||||
if e.Type == "image" && e.Attrs["name"] == "" && e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cacheonly is a fake exporter to opt out of default behaviors
|
||||
exports := make([]client.ExportEntry, 0, len(opt.Exports))
|
||||
for _, e := range opt.Exports {
|
||||
if e.Type != "cacheonly" {
|
||||
exports = append(exports, e)
|
||||
}
|
||||
}
|
||||
opt.Exports = exports
|
||||
|
||||
// set up exporters
|
||||
for i, e := range opt.Exports {
|
||||
if e.Type == "oci" && !nodeDriver.Features(ctx)[driver.OCIExporter] {
|
||||
return nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
if e.Type == "docker" {
|
||||
features := docker.Features(ctx, e.Attrs["context"])
|
||||
if features[dockerutil.OCIImporter] && e.Output == nil {
|
||||
// rely on oci importer if available (which supports
|
||||
// multi-platform images), otherwise fall back to docker
|
||||
opt.Exports[i].Type = "oci"
|
||||
} else if len(opt.Platforms) > 1 || len(attests) > 0 {
|
||||
if e.Output != nil {
|
||||
return nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||
}
|
||||
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
}
|
||||
if e.Output == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
e.Type = "image"
|
||||
} else {
|
||||
w, cancel, err := docker.LoadImage(ctx, e.Attrs["context"], pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, cancel)
|
||||
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||
return w, nil
|
||||
}
|
||||
}
|
||||
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
}
|
||||
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||
opt.Exports[i].Type = "moby"
|
||||
if e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||
// inline buildinfo attrs from build arg
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||
opt.Exports[i].Attrs["buildinfo-attrs"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
so.Exports = opt.Exports
|
||||
so.Session = slices.Clone(opt.Session)
|
||||
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, &opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, releaseLoad)
|
||||
|
||||
// add node identifier to shared key if one was specified
|
||||
if so.SharedKey != "" {
|
||||
so.SharedKey += ":" + cfg.TryNodeIdentifier()
|
||||
}
|
||||
|
||||
if opt.Pull {
|
||||
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModeForcePull
|
||||
} else if nodeDriver.IsMobyDriver() {
|
||||
// moby driver always resolves local images by default
|
||||
so.FrontendAttrs["image-resolve-mode"] = pb.AttrImageResolveModePreferLocal
|
||||
}
|
||||
if opt.Target != "" {
|
||||
so.FrontendAttrs["target"] = opt.Target
|
||||
}
|
||||
if len(opt.NoCacheFilter) > 0 {
|
||||
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
||||
}
|
||||
if opt.NoCache {
|
||||
so.FrontendAttrs["no-cache"] = ""
|
||||
}
|
||||
for k, v := range opt.BuildArgs {
|
||||
so.FrontendAttrs["build-arg:"+k] = v
|
||||
}
|
||||
for k, v := range opt.Labels {
|
||||
so.FrontendAttrs["label:"+k] = v
|
||||
}
|
||||
|
||||
for k, v := range node.ProxyConfig {
|
||||
if _, ok := opt.BuildArgs[k]; !ok {
|
||||
so.FrontendAttrs["build-arg:"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// set platforms
|
||||
if len(opt.Platforms) != 0 {
|
||||
pp := make([]string, len(opt.Platforms))
|
||||
for i, p := range opt.Platforms {
|
||||
pp[i] = platforms.Format(p)
|
||||
}
|
||||
if len(pp) > 1 && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||
}
|
||||
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||
}
|
||||
|
||||
// setup networkmode
|
||||
switch opt.NetworkMode {
|
||||
case "host":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
||||
case "none":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
case "", "default":
|
||||
default:
|
||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||
}
|
||||
|
||||
// setup extrahosts
|
||||
extraHosts, err := toBuildkitExtraHosts(ctx, opt.ExtraHosts, nodeDriver)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(extraHosts) > 0 {
|
||||
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||
}
|
||||
|
||||
// setup shm size
|
||||
if opt.ShmSize.Value() > 0 {
|
||||
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
|
||||
}
|
||||
|
||||
// setup ulimits
|
||||
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
} else if len(ulimits) > 0 {
|
||||
so.FrontendAttrs["ulimit"] = ulimits
|
||||
}
|
||||
|
||||
// mark call request as internal
|
||||
if opt.CallFunc != nil {
|
||||
so.Internal = true
|
||||
}
|
||||
|
||||
return &so, releaseF, nil
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
|
||||
// TODO: handle stdin, symlinks, remote contexts, check files exist
|
||||
|
||||
var (
|
||||
err error
|
||||
dockerfileReader io.ReadCloser
|
||||
dockerfileDir string
|
||||
dockerfileName = inp.DockerfilePath
|
||||
dockerfileSrcName = inp.DockerfilePath
|
||||
toRemove []string
|
||||
)
|
||||
|
||||
switch {
|
||||
case inp.ContextState != nil:
|
||||
if target.FrontendInputs == nil {
|
||||
target.FrontendInputs = make(map[string]llb.State)
|
||||
}
|
||||
target.FrontendInputs["context"] = *inp.ContextState
|
||||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
|
||||
rc := inp.InStream.NewReadCloser()
|
||||
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
if !(err == io.EOF && len(magic) == 0) {
|
||||
if isArchive(magic) {
|
||||
// stdin is context
|
||||
up := uploadprovider.New()
|
||||
target.FrontendAttrs["context"] = up.Add(rc)
|
||||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = rc
|
||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case osutil.IsLocalDir(inp.ContextPath):
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedKey := inp.ContextPath
|
||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||
sharedKey = filepath.Base(p)
|
||||
}
|
||||
target.SharedKey = sharedKey
|
||||
switch inp.DockerfilePath {
|
||||
case "-":
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
case "":
|
||||
dockerfileDir = inp.ContextPath
|
||||
default:
|
||||
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||
}
|
||||
case IsRemoteURL(inp.ContextPath):
|
||||
if inp.DockerfilePath == "-" {
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
} else if filepath.IsAbs(inp.DockerfilePath) {
|
||||
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||
}
|
||||
target.FrontendAttrs["context"] = inp.ContextPath
|
||||
default:
|
||||
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||
dockerfileSrcName = "inline"
|
||||
} else if inp.DockerfilePath == "-" {
|
||||
dockerfileSrcName = "stdin"
|
||||
} else if inp.DockerfilePath == "" {
|
||||
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||
}
|
||||
|
||||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||
}
|
||||
if isHTTPURL(inp.DockerfilePath) {
|
||||
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
|
||||
delete(target.FrontendInputs, "dockerfile")
|
||||
}
|
||||
|
||||
if dockerfileName == "" {
|
||||
dockerfileName = "Dockerfile"
|
||||
}
|
||||
|
||||
if dockerfileDir != "" {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
}
|
||||
|
||||
target.FrontendAttrs["filename"] = dockerfileName
|
||||
|
||||
for k, v := range inp.NamedContexts {
|
||||
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
||||
if v.State != nil {
|
||||
target.FrontendAttrs["context:"+k] = "input:" + k
|
||||
if target.FrontendInputs == nil {
|
||||
target.FrontendInputs = make(map[string]llb.State)
|
||||
}
|
||||
target.FrontendInputs[k] = *v.State
|
||||
continue
|
||||
}
|
||||
|
||||
if IsRemoteURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
||||
target.FrontendAttrs["context:"+k] = v.Path
|
||||
continue
|
||||
}
|
||||
|
||||
// handle OCI layout
|
||||
if strings.HasPrefix(v.Path, "oci-layout://") {
|
||||
localPath := strings.TrimPrefix(v.Path, "oci-layout://")
|
||||
localPath, dig, hasDigest := strings.Cut(localPath, "@")
|
||||
localPath, tag, hasTag := strings.Cut(localPath, ":")
|
||||
if !hasTag {
|
||||
tag = "latest"
|
||||
}
|
||||
if !hasDigest {
|
||||
dig, err = resolveDigest(localPath, tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||
}
|
||||
}
|
||||
store, err := local.NewStore(localPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||
}
|
||||
storeName := identity.NewID()
|
||||
if target.OCIStores == nil {
|
||||
target.OCIStores = map[string]content.Store{}
|
||||
}
|
||||
target.OCIStores[storeName] = store
|
||||
|
||||
target.FrontendAttrs["context:"+k] = "oci-layout://" + storeName + ":" + tag + "@" + dig
|
||||
continue
|
||||
}
|
||||
st, err := os.Stat(v.Path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
}
|
||||
localName := k
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
}
|
||||
|
||||
release := func() {
|
||||
for _, dir := range toRemove {
|
||||
_ = os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
|
||||
inp.DockerfileMappingSrc = dockerfileSrcName
|
||||
inp.DockerfileMappingDst = dockerfileName
|
||||
return release, nil
|
||||
}
|
||||
|
||||
func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||
idx := ociindex.NewStoreIndex(localPath)
|
||||
|
||||
// lookup by name
|
||||
desc, err := idx.Get(tag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if desc == nil {
|
||||
// lookup single
|
||||
desc, err = idx.GetSingle()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return "", errors.New("failed to resolve digest")
|
||||
}
|
||||
|
||||
dig = string(desc.Digest)
|
||||
_, err = digest.Parse(dig)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "invalid digest %s", dig)
|
||||
}
|
||||
|
||||
return dig, nil
|
||||
}
|
||||
|
||||
func setLocalMount(name, dir string, so *client.SolveOpt) error {
|
||||
lm, err := fsutil.NewFS(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if so.LocalMounts == nil {
|
||||
so.LocalMounts = map[string]fsutil.FS{}
|
||||
}
|
||||
so.LocalMounts[name] = &fs{FS: lm, dir: dir}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) {
|
||||
dir, err := os.MkdirTemp("", "dockerfile")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f, err := os.Create(filepath.Join(dir, "Dockerfile"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if multiReader != nil {
|
||||
dt, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
multiReader.Reset(dt)
|
||||
r = bytes.NewReader(dt)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dir, err
|
||||
}
|
||||
|
||||
// handle https://github.com/moby/moby/pull/10858
|
||||
func handleLowercaseDockerfile(dir, p string) string {
|
||||
if filepath.Base(p) != "Dockerfile" {
|
||||
return p
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Dir(filepath.Join(dir, p)))
|
||||
if err != nil {
|
||||
return p
|
||||
}
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return p
|
||||
}
|
||||
|
||||
foundLowerCase := false
|
||||
for _, n := range names {
|
||||
if n == "Dockerfile" {
|
||||
return p
|
||||
}
|
||||
if n == "dockerfile" {
|
||||
foundLowerCase = true
|
||||
}
|
||||
}
|
||||
if foundLowerCase {
|
||||
return filepath.Join(filepath.Dir(p), "dockerfile")
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type fs struct {
|
||||
fsutil.FS
|
||||
dir string
|
||||
}
|
||||
|
||||
var _ fsutil.FS = &fs{}
|
||||
@@ -1,157 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type provenancePredicate struct {
|
||||
Builder *provenanceBuilder `json:"builder,omitempty"`
|
||||
provenancetypes.ProvenancePredicate
|
||||
}
|
||||
|
||||
type provenanceBuilder struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.SolveResponse, ref string, mode confutil.MetadataProvenanceMode, pw progress.Writer) error {
|
||||
if mode == confutil.MetadataProvenanceModeDisabled {
|
||||
return nil
|
||||
}
|
||||
pw = progress.ResetTime(pw)
|
||||
return progress.Wrap("resolving provenance for metadata file", pw.Write, func(l progress.SubLogger) error {
|
||||
res, err := fetchProvenance(ctx, c, ref, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range res {
|
||||
sr.ExporterResponse[k] = v
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func fetchProvenance(ctx context.Context, c *client.Client, ref string, mode confutil.MetadataProvenanceMode) (out map[string]string, err error) {
|
||||
cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||
Ref: ref,
|
||||
EarlyExit: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
for {
|
||||
ev, err := cl.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ev.Record == nil {
|
||||
continue
|
||||
}
|
||||
if ev.Record.Result != nil {
|
||||
desc := lookupProvenance(ev.Record.Result)
|
||||
if desc == nil {
|
||||
continue
|
||||
}
|
||||
eg.Go(func() error {
|
||||
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||
}
|
||||
prv, err := encodeProvenance(dt, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
if out == nil {
|
||||
out = make(map[string]string)
|
||||
}
|
||||
out["buildx.build.provenance"] = prv
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
} else if ev.Record.Results != nil {
|
||||
for platform, res := range ev.Record.Results {
|
||||
platform := platform
|
||||
desc := lookupProvenance(res)
|
||||
if desc == nil {
|
||||
continue
|
||||
}
|
||||
eg.Go(func() error {
|
||||
dt, err := content.ReadBlob(ctx, store, *desc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to load provenance blob from build record")
|
||||
}
|
||||
prv, err := encodeProvenance(dt, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
if out == nil {
|
||||
out = make(map[string]string)
|
||||
}
|
||||
out["buildx.build.provenance/"+platform] = prv
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, eg.Wait()
|
||||
}
|
||||
|
||||
func lookupProvenance(res *controlapi.BuildResultInfo) *ocispecs.Descriptor {
|
||||
for _, a := range res.Attestations {
|
||||
if a.MediaType == "application/vnd.in-toto+json" && strings.HasPrefix(a.Annotations["in-toto.io/predicate-type"], "https://slsa.dev/provenance/") {
|
||||
return &ocispecs.Descriptor{
|
||||
Digest: digest.Digest(a.Digest),
|
||||
Size: a.Size,
|
||||
MediaType: a.MediaType,
|
||||
Annotations: a.Annotations,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeProvenance(dt []byte, mode confutil.MetadataProvenanceMode) (string, error) {
|
||||
var prv provenancePredicate
|
||||
if err := json.Unmarshal(dt, &prv); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to unmarshal provenance")
|
||||
}
|
||||
if prv.Builder != nil && prv.Builder.ID == "" {
|
||||
// reset builder if id is empty
|
||||
prv.Builder = nil
|
||||
}
|
||||
if mode == confutil.MetadataProvenanceModeMin {
|
||||
// reset fields for minimal provenance
|
||||
prv.BuildConfig = nil
|
||||
prv.Metadata = nil
|
||||
}
|
||||
dtprv, err := json.Marshal(prv)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to marshal provenance")
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(dtprv), nil
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SyncMultiReader struct {
|
||||
source *bufio.Reader
|
||||
buffer []byte
|
||||
static []byte
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond
|
||||
readers []*syncReader
|
||||
err error
|
||||
offset int
|
||||
}
|
||||
|
||||
type syncReader struct {
|
||||
mr *SyncMultiReader
|
||||
offset int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func NewSyncMultiReader(source io.Reader) *SyncMultiReader {
|
||||
mr := &SyncMultiReader{
|
||||
source: bufio.NewReader(source),
|
||||
buffer: make([]byte, 0, 32*1024),
|
||||
}
|
||||
mr.cond = sync.NewCond(&mr.mu)
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Peek(n int) ([]byte, error) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return mr.static[min(n, len(mr.static)):], nil
|
||||
}
|
||||
|
||||
return mr.source.Peek(n)
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Reset(dt []byte) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
mr.static = dt
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return io.NopCloser(bytes.NewReader(mr.static))
|
||||
}
|
||||
|
||||
reader := &syncReader{
|
||||
mr: mr,
|
||||
}
|
||||
mr.readers = append(mr.readers, reader)
|
||||
return reader
|
||||
}
|
||||
|
||||
func (sr *syncReader) Read(p []byte) (int, error) {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
func (sr *syncReader) read(p []byte) (int, error) {
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
loop0:
|
||||
for {
|
||||
if sr.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
if sr.mr.err != nil && sr.offset == end {
|
||||
return 0, sr.mr.err
|
||||
}
|
||||
|
||||
start := sr.offset - sr.mr.offset
|
||||
|
||||
dt := sr.mr.buffer[start:]
|
||||
|
||||
if len(dt) > 0 {
|
||||
n := copy(p, dt)
|
||||
sr.offset += n
|
||||
sr.mr.cond.Broadcast()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// check for readers that have not caught up
|
||||
hasOpen := false
|
||||
for _, r := range sr.mr.readers {
|
||||
if !r.closed {
|
||||
hasOpen = true
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if r.offset < end {
|
||||
sr.mr.cond.Wait()
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
|
||||
if !hasOpen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
last := sr.mr.offset + len(sr.mr.buffer)
|
||||
// another reader has already updated the buffer
|
||||
if last > end || sr.mr.err != nil {
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
sr.mr.offset += len(sr.mr.buffer)
|
||||
|
||||
sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)]
|
||||
n, err := sr.mr.source.Read(sr.mr.buffer)
|
||||
if n >= 0 {
|
||||
sr.mr.buffer = sr.mr.buffer[:n]
|
||||
} else {
|
||||
sr.mr.buffer = sr.mr.buffer[:0]
|
||||
}
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
if err != nil {
|
||||
sr.mr.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn := copy(p, sr.mr.buffer)
|
||||
sr.offset += nn
|
||||
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (sr *syncReader) Close() error {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
if sr.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
sr.closed = true
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
mathrand "math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func generateRandomData(size int) []byte {
|
||||
data := make([]byte, size)
|
||||
rand.Read(data)
|
||||
return data
|
||||
}
|
||||
func TestSyncMultiReaderParallel(t *testing.T) {
|
||||
data := generateRandomData(1024 * 1024)
|
||||
source := bytes.NewReader(data)
|
||||
mr := NewSyncMultiReader(source)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numReaders := 10
|
||||
bufferSize := 4096 * 4
|
||||
|
||||
readers := make([]io.ReadCloser, numReaders)
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
readers[i] = mr.NewReadCloser()
|
||||
}
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(readerId int) {
|
||||
defer wg.Done()
|
||||
reader := readers[readerId]
|
||||
defer reader.Close()
|
||||
|
||||
totalRead := 0
|
||||
buf := make([]byte, bufferSize)
|
||||
for totalRead < len(data) {
|
||||
// Simulate random read sizes
|
||||
readSize := mathrand.Intn(bufferSize) //nolint:gosec
|
||||
n, err := reader.Read(buf[:readSize])
|
||||
|
||||
if n > 0 {
|
||||
assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId)
|
||||
totalRead += n
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(t, err, "Reader %d error", readerId)
|
||||
|
||||
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||
t.Logf("Reader %d closing", readerId)
|
||||
// Simulate random close
|
||||
return
|
||||
}
|
||||
|
||||
// Simulate random timing between reads
|
||||
time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec
|
||||
}
|
||||
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
495
build/result.go
495
build/result.go
@@ -1,495 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "crypto/sha256" // ensure digests can be computed
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/solver/result"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// NewResultHandle makes a call to client.Build, additionally returning a
|
||||
// opaque ResultHandle alongside the standard response and error.
|
||||
//
|
||||
// This ResultHandle can be used to execute additional build steps in the same
|
||||
// context as the build occurred, which can allow easy debugging of build
|
||||
// failures and successes.
|
||||
//
|
||||
// If the returned ResultHandle is not nil, the caller must call Done() on it.
|
||||
func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) {
|
||||
// Create a new context to wrap the original, and cancel it when the
|
||||
// caller-provided context is cancelled.
|
||||
//
|
||||
// We derive the context from the background context so that we can forbid
|
||||
// cancellation of the build request after <-done is closed (which we do
|
||||
// before returning the ResultHandle).
|
||||
baseCtx := ctx
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-baseCtx.Done():
|
||||
cancel(baseCtx.Err())
|
||||
case <-done:
|
||||
// Once done is closed, we've recorded a ResultHandle, so we
|
||||
// shouldn't allow cancelling the underlying build request anymore.
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a new channel to forward status messages to the original.
|
||||
//
|
||||
// We do this so that we can discard status messages after the main portion
|
||||
// of the build is complete. This is necessary for the solve error case,
|
||||
// where the original gateway is kept open until the ResultHandle is
|
||||
// closed - we don't want progress messages from operations in that
|
||||
// ResultHandle to display after this function exits.
|
||||
//
|
||||
// Additionally, callers should wait for the progress channel to be closed.
|
||||
// If we keep the session open and never close the progress channel, the
|
||||
// caller will likely hang.
|
||||
baseCh := ch
|
||||
ch = make(chan *client.SolveStatus)
|
||||
go func() {
|
||||
for {
|
||||
s, ok := <-ch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-baseCh:
|
||||
// base channel is closed, discard status messages
|
||||
default:
|
||||
baseCh <- s
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer close(baseCh)
|
||||
|
||||
var resp *client.SolveResponse
|
||||
var respErr error
|
||||
var respHandle *ResultHandle
|
||||
|
||||
go func() {
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||
|
||||
var res *gateway.Result
|
||||
var err error
|
||||
resp, err = cc.Build(ctx, opt, product, func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
var err error
|
||||
res, err = buildFunc(ctx, c)
|
||||
|
||||
if res != nil && err == nil {
|
||||
// Force evaluation of the build result (otherwise, we likely
|
||||
// won't get a solve error)
|
||||
def, err2 := getDefinition(ctx, res)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
res, err = evalDefinition(ctx, c, def)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Scenario 1: we failed to evaluate a node somewhere in the
|
||||
// build graph.
|
||||
//
|
||||
// In this case, we construct a ResultHandle from this
|
||||
// original Build session, and return it alongside the original
|
||||
// build error. We then need to keep the gateway session open
|
||||
// until the caller explicitly closes the ResultHandle.
|
||||
|
||||
var se *errdefs.SolveError
|
||||
if errors.As(err, &se) {
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
solveErr: se,
|
||||
gwClient: c,
|
||||
gwCtx: ctx,
|
||||
}
|
||||
respErr = err // return original error to preserve stacktrace
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}, ch)
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// Something unexpected failed during the build, we didn't succeed,
|
||||
// but we also didn't make it far enough to create a ResultHandle.
|
||||
respErr = err
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
// Scenario 2: we successfully built the image with no errors.
|
||||
//
|
||||
// In this case, the original gateway session has now been closed
|
||||
// since the Build has been completed. So, we need to create a new
|
||||
// gateway session to populate the ResultHandle. To do this, we
|
||||
// need to re-evaluate the target result, in this new session. This
|
||||
// should be instantaneous since the result should be cached.
|
||||
|
||||
def, err := getDefinition(ctx, res)
|
||||
if err != nil {
|
||||
respErr = err
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: ideally this second connection should be lazily opened
|
||||
opt := opt
|
||||
opt.Ref = ""
|
||||
opt.Exports = nil
|
||||
opt.CacheExports = nil
|
||||
opt.Internal = true
|
||||
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
res, err := evalDefinition(ctx, c, def)
|
||||
if err != nil {
|
||||
// This should probably not happen, since we've previously
|
||||
// successfully evaluated the same result with no issues.
|
||||
return nil, errors.Wrap(err, "inconsistent solve result")
|
||||
}
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
res: res,
|
||||
gwClient: c,
|
||||
gwCtx: ctx,
|
||||
}
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil, context.Cause(ctx)
|
||||
}, nil)
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Block until the other thread signals that it's completed the build.
|
||||
select {
|
||||
case <-done:
|
||||
case <-baseCtx.Done():
|
||||
if respErr == nil {
|
||||
respErr = baseCtx.Err()
|
||||
}
|
||||
}
|
||||
return respHandle, resp, respErr
|
||||
}
|
||||
|
||||
// getDefinition converts a gateway result into a collection of definitions for
|
||||
// each ref in the result.
|
||||
func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) {
|
||||
return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) {
|
||||
st, err := ref.ToState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
def, err := st.Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return def.ToPB(), nil
|
||||
})
|
||||
}
|
||||
|
||||
// evalDefinition performs the reverse of getDefinition, converting a
|
||||
// collection of definitions into a gateway result.
|
||||
func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*pb.Definition]) (*gateway.Result, error) {
|
||||
// force evaluation of all targets in parallel
|
||||
results := make(map[*pb.Definition]*gateway.Result)
|
||||
resultsMu := sync.Mutex{}
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
defs.EachRef(func(def *pb.Definition) error {
|
||||
eg.Go(func() error {
|
||||
res, err := c.Solve(egCtx, gateway.SolveRequest{
|
||||
Evaluate: true,
|
||||
Definition: def,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resultsMu.Lock()
|
||||
results[def] = res
|
||||
resultsMu.Unlock()
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, _ := result.ConvertResult(defs, func(def *pb.Definition) (gateway.Reference, error) {
|
||||
if res, ok := results[def]; ok {
|
||||
return res.Ref, nil
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ResultHandle is a build result with the client that built it.
|
||||
type ResultHandle struct {
|
||||
res *gateway.Result
|
||||
solveErr *errdefs.SolveError
|
||||
|
||||
done chan struct{}
|
||||
doneOnce sync.Once
|
||||
|
||||
gwClient gateway.Client
|
||||
gwCtx context.Context
|
||||
|
||||
cleanups []func()
|
||||
cleanupsMu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *ResultHandle) Done() {
|
||||
r.doneOnce.Do(func() {
|
||||
r.cleanupsMu.Lock()
|
||||
cleanups := r.cleanups
|
||||
r.cleanups = nil
|
||||
r.cleanupsMu.Unlock()
|
||||
for _, f := range cleanups {
|
||||
f()
|
||||
}
|
||||
|
||||
close(r.done)
|
||||
<-r.gwCtx.Done()
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ResultHandle) registerCleanup(f func()) {
|
||||
r.cleanupsMu.Lock()
|
||||
r.cleanups = append(r.cleanups, f)
|
||||
r.cleanupsMu.Unlock()
|
||||
}
|
||||
|
||||
func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
||||
_, err = buildFunc(r.gwCtx, r.gwClient)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ResultHandle) getContainerConfig(cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
ccfg, err := containerConfigFromResult(r.res, cfg)
|
||||
if err != nil {
|
||||
return containerCfg, err
|
||||
}
|
||||
containerCfg = *ccfg
|
||||
} else {
|
||||
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||
ccfg, err := containerConfigFromError(r.solveErr, cfg)
|
||||
if err != nil {
|
||||
return containerCfg, errors.Wrapf(err, "no result nor error is available")
|
||||
}
|
||||
containerCfg = *ccfg
|
||||
}
|
||||
return containerCfg, nil
|
||||
}
|
||||
|
||||
func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||
processCfg := newStartRequest(stdin, stdout, stderr)
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
if err := populateProcessConfigFromResult(&processCfg, r.res, cfg); err != nil {
|
||||
return processCfg, err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("creating container from failed build %+v", cfg)
|
||||
if err := populateProcessConfigFromError(&processCfg, r.solveErr, cfg); err != nil {
|
||||
return processCfg, err
|
||||
}
|
||||
}
|
||||
return processCfg, nil
|
||||
}
|
||||
|
||||
func containerConfigFromResult(res *gateway.Result, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||
if cfg.Initial {
|
||||
return nil, errors.Errorf("starting from the container from the initial state of the step is supported only on the failed steps")
|
||||
}
|
||||
|
||||
ps, err := exptypes.ParsePlatforms(res.Metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref, ok := res.FindRef(ps.Platforms[0].ID)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("no reference found")
|
||||
}
|
||||
|
||||
return &gateway.NewContainerRequest{
|
||||
Mounts: []gateway.Mount{
|
||||
{
|
||||
Dest: "/",
|
||||
MountType: pb.MountType_BIND,
|
||||
Ref: ref,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func populateProcessConfigFromResult(req *gateway.StartRequest, res *gateway.Result, cfg *controllerapi.InvokeConfig) error {
|
||||
imgData := res.Metadata[exptypes.ExporterImageConfigKey]
|
||||
var img *specs.Image
|
||||
if len(imgData) > 0 {
|
||||
img = &specs.Image{}
|
||||
if err := json.Unmarshal(imgData, img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
user := ""
|
||||
if !cfg.NoUser {
|
||||
user = cfg.User
|
||||
} else if img != nil {
|
||||
user = img.Config.User
|
||||
}
|
||||
|
||||
cwd := ""
|
||||
if !cfg.NoCwd {
|
||||
cwd = cfg.Cwd
|
||||
} else if img != nil {
|
||||
cwd = img.Config.WorkingDir
|
||||
}
|
||||
|
||||
env := []string{}
|
||||
if img != nil {
|
||||
env = append(env, img.Config.Env...)
|
||||
}
|
||||
env = append(env, cfg.Env...)
|
||||
|
||||
args := []string{}
|
||||
if cfg.Entrypoint != nil {
|
||||
args = append(args, cfg.Entrypoint...)
|
||||
} else if img != nil {
|
||||
args = append(args, img.Config.Entrypoint...)
|
||||
}
|
||||
if !cfg.NoCmd {
|
||||
args = append(args, cfg.Cmd...)
|
||||
} else if img != nil {
|
||||
args = append(args, img.Config.Cmd...)
|
||||
}
|
||||
|
||||
req.Args = args
|
||||
req.Env = env
|
||||
req.User = user
|
||||
req.Cwd = cwd
|
||||
req.Tty = cfg.Tty
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerConfigFromError(solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) (*gateway.NewContainerRequest, error) {
|
||||
exec, err := execOpFromError(solveErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var mounts []gateway.Mount
|
||||
for i, mnt := range exec.Mounts {
|
||||
rid := solveErr.Solve.MountIDs[i]
|
||||
if cfg.Initial {
|
||||
rid = solveErr.Solve.InputIDs[i]
|
||||
}
|
||||
mounts = append(mounts, gateway.Mount{
|
||||
Selector: mnt.Selector,
|
||||
Dest: mnt.Dest,
|
||||
ResultID: rid,
|
||||
Readonly: mnt.Readonly,
|
||||
MountType: mnt.MountType,
|
||||
CacheOpt: mnt.CacheOpt,
|
||||
SecretOpt: mnt.SecretOpt,
|
||||
SSHOpt: mnt.SSHOpt,
|
||||
})
|
||||
}
|
||||
return &gateway.NewContainerRequest{
|
||||
Mounts: mounts,
|
||||
NetMode: exec.Network,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func populateProcessConfigFromError(req *gateway.StartRequest, solveErr *errdefs.SolveError, cfg *controllerapi.InvokeConfig) error {
|
||||
exec, err := execOpFromError(solveErr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta := exec.Meta
|
||||
user := ""
|
||||
if !cfg.NoUser {
|
||||
user = cfg.User
|
||||
} else {
|
||||
user = meta.User
|
||||
}
|
||||
|
||||
cwd := ""
|
||||
if !cfg.NoCwd {
|
||||
cwd = cfg.Cwd
|
||||
} else {
|
||||
cwd = meta.Cwd
|
||||
}
|
||||
|
||||
env := append(meta.Env, cfg.Env...)
|
||||
|
||||
args := []string{}
|
||||
if cfg.Entrypoint != nil {
|
||||
args = append(args, cfg.Entrypoint...)
|
||||
}
|
||||
if cfg.Cmd != nil {
|
||||
args = append(args, cfg.Cmd...)
|
||||
}
|
||||
if len(args) == 0 {
|
||||
args = meta.Args
|
||||
}
|
||||
|
||||
req.Args = args
|
||||
req.Env = env
|
||||
req.User = user
|
||||
req.Cwd = cwd
|
||||
req.Tty = cfg.Tty
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func execOpFromError(solveErr *errdefs.SolveError) (*pb.ExecOp, error) {
|
||||
if solveErr == nil {
|
||||
return nil, errors.Errorf("no error is available")
|
||||
}
|
||||
switch op := solveErr.Solve.Op.GetOp().(type) {
|
||||
case *pb.Op_Exec:
|
||||
return op.Exec, nil
|
||||
default:
|
||||
return nil, errors.Errorf("invoke: unsupported error type")
|
||||
}
|
||||
// TODO: support other ops
|
||||
}
|
||||
|
||||
func newStartRequest(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) gateway.StartRequest {
|
||||
return gateway.StartRequest{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
}
|
||||
12
build/url.go
12
build/url.go
@@ -7,16 +7,13 @@ import (
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const maxDockerfileSize = 2 * 1024 * 1024 // 2 MB
|
||||
|
||||
func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, url string, pw progress.Writer) (string, error) {
|
||||
func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url string, pw progress.Writer) (string, error) {
|
||||
c, err := driver.Boot(ctx, ctx, d, pw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -24,7 +21,7 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
||||
var out string
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
_, err = c.Build(ctx, client.SolveOpt{Internal: true}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
_, err = c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gwclient.Client) (*gwclient.Result, error) {
|
||||
def, err := llb.HTTP(url, llb.Filename("Dockerfile"), llb.WithCustomNamef("[internal] load %s", url)).Marshal(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -46,8 +43,8 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Size > maxDockerfileSize {
|
||||
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size (%s)", url, units.HumanSize(maxDockerfileSize))
|
||||
if stat.Size() > 512*1024 {
|
||||
return nil, errors.Errorf("Dockerfile %s bigger than allowed max size", url)
|
||||
}
|
||||
|
||||
dt, err := ref.ReadFile(ctx, gwclient.ReadRequest{
|
||||
@@ -66,6 +63,7 @@ func createTempDockerfileFromURL(ctx context.Context, d *driver.DriverHandle, ur
|
||||
out = dir
|
||||
return nil, nil
|
||||
}, ch)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -3,17 +3,12 @@ package build
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/moby/buildkit/util/gitutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,21 +20,9 @@ const (
|
||||
mobyHostGatewayName = "host-gateway"
|
||||
)
|
||||
|
||||
// isHTTPURL returns true if the provided str is an HTTP(S) URL by checking if it
|
||||
// has a http:// or https:// scheme. No validation is performed to verify if the
|
||||
// URL is well-formed.
|
||||
func isHTTPURL(str string) bool {
|
||||
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
|
||||
}
|
||||
|
||||
func IsRemoteURL(c string) bool {
|
||||
if isHTTPURL(c) {
|
||||
return true
|
||||
}
|
||||
if _, err := gitutil.ParseGitRef(c); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
func isLocalDir(c string) bool {
|
||||
st, err := os.Stat(c)
|
||||
return err == nil && st.IsDir()
|
||||
}
|
||||
|
||||
func isArchive(header []byte) bool {
|
||||
@@ -62,34 +45,18 @@ func isArchive(header []byte) bool {
|
||||
}
|
||||
|
||||
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
||||
func toBuildkitExtraHosts(ctx context.Context, inp []string, nodeDriver *driver.DriverHandle) (string, error) {
|
||||
func toBuildkitExtraHosts(inp []string, mobyDriver bool) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
hosts := make([]string, 0, len(inp))
|
||||
for _, h := range inp {
|
||||
host, ip, ok := strings.Cut(h, "=")
|
||||
if !ok {
|
||||
host, ip, ok = strings.Cut(h, ":")
|
||||
}
|
||||
host, ip, ok := strings.Cut(h, ":")
|
||||
if !ok || host == "" || ip == "" {
|
||||
return "", errors.Errorf("invalid host %s", h)
|
||||
}
|
||||
// If the IP Address is a "host-gateway", replace this value with the
|
||||
// IP address provided by the worker's label.
|
||||
if ip == mobyHostGatewayName {
|
||||
hgip, err := nodeDriver.HostGatewayIP(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to derive the IP value for host-gateway")
|
||||
}
|
||||
ip = hgip.String()
|
||||
} else {
|
||||
// If the address is enclosed in square brackets, extract it (for IPv6, but
|
||||
// permit it for IPv4 as well; we don't know the address family here, but it's
|
||||
// unambiguous).
|
||||
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
|
||||
ip = ip[1 : len(ip)-1]
|
||||
}
|
||||
// Skip IP address validation for "host-gateway" string with moby driver
|
||||
if !mobyDriver || ip != mobyHostGatewayName {
|
||||
if net.ParseIP(ip) == nil {
|
||||
return "", errors.Errorf("invalid host %s", h)
|
||||
}
|
||||
@@ -110,21 +77,3 @@ func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
|
||||
}
|
||||
return strings.Join(ulimits, ","), nil
|
||||
}
|
||||
|
||||
func notSupported(f driver.Feature, d *driver.DriverHandle, docs string) error {
|
||||
return errors.Errorf(`%s is not supported for the %s driver.
|
||||
Switch to a different driver, or turn on the containerd image store, and try again.
|
||||
Learn more at %s`, f, d.Factory().Name(), docs)
|
||||
}
|
||||
|
||||
func noDefaultLoad() bool {
|
||||
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
logrus.Warnf("invalid non-bool value for BUILDX_NO_DEFAULT_LOAD: %s", v)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestToBuildkitExtraHosts(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc string
|
||||
input []string
|
||||
expectedOut string // Expect output==input if not set.
|
||||
expectedErr string // Expect success if not set.
|
||||
}{
|
||||
{
|
||||
doc: "IPv4, colon sep",
|
||||
input: []string{`myhost:192.168.0.1`},
|
||||
expectedOut: `myhost=192.168.0.1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv4, eq sep",
|
||||
input: []string{`myhost=192.168.0.1`},
|
||||
},
|
||||
{
|
||||
doc: "Weird but permitted, IPv4 with brackets",
|
||||
input: []string{`myhost=[192.168.0.1]`},
|
||||
expectedOut: `myhost=192.168.0.1`,
|
||||
},
|
||||
{
|
||||
doc: "Host and domain",
|
||||
input: []string{`host.and.domain.invalid:10.0.2.1`},
|
||||
expectedOut: `host.and.domain.invalid=10.0.2.1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6, colon sep",
|
||||
input: []string{`anipv6host:2003:ab34:e::1`},
|
||||
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6, colon sep, brackets",
|
||||
input: []string{`anipv6host:[2003:ab34:e::1]`},
|
||||
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6, eq sep, brackets",
|
||||
input: []string{`anipv6host=[2003:ab34:e::1]`},
|
||||
expectedOut: `anipv6host=2003:ab34:e::1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, colon sep",
|
||||
input: []string{`ipv6local:::1`},
|
||||
expectedOut: `ipv6local=::1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, eq sep",
|
||||
input: []string{`ipv6local=::1`},
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, eq sep, brackets",
|
||||
input: []string{`ipv6local=[::1]`},
|
||||
expectedOut: `ipv6local=::1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, non-canonical, colon sep",
|
||||
input: []string{`ipv6local:0:0:0:0:0:0:0:1`},
|
||||
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, non-canonical, eq sep",
|
||||
input: []string{`ipv6local=0:0:0:0:0:0:0:1`},
|
||||
},
|
||||
{
|
||||
doc: "IPv6 localhost, non-canonical, eq sep, brackets",
|
||||
input: []string{`ipv6local=[0:0:0:0:0:0:0:1]`},
|
||||
expectedOut: `ipv6local=0:0:0:0:0:0:0:1`,
|
||||
},
|
||||
{
|
||||
doc: "Bad address, colon sep",
|
||||
input: []string{`myhost:192.notanipaddress.1`},
|
||||
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||
},
|
||||
{
|
||||
doc: "Bad address, eq sep",
|
||||
input: []string{`myhost=192.notanipaddress.1`},
|
||||
expectedErr: `invalid IP address in add-host: "192.notanipaddress.1"`,
|
||||
},
|
||||
{
|
||||
doc: "No sep",
|
||||
input: []string{`thathost-nosemicolon10.0.0.1`},
|
||||
expectedErr: `bad format for add-host: "thathost-nosemicolon10.0.0.1"`,
|
||||
},
|
||||
{
|
||||
doc: "Bad IPv6",
|
||||
input: []string{`anipv6host:::::1`},
|
||||
expectedErr: `invalid IP address in add-host: "::::1"`,
|
||||
},
|
||||
{
|
||||
doc: "Bad IPv6, trailing colons",
|
||||
input: []string{`ipv6local:::0::`},
|
||||
expectedErr: `invalid IP address in add-host: "::0::"`,
|
||||
},
|
||||
{
|
||||
doc: "Bad IPv6, missing close bracket",
|
||||
input: []string{`ipv6addr=[::1`},
|
||||
expectedErr: `invalid IP address in add-host: "[::1"`,
|
||||
},
|
||||
{
|
||||
doc: "Bad IPv6, missing open bracket",
|
||||
input: []string{`ipv6addr=::1]`},
|
||||
expectedErr: `invalid IP address in add-host: "::1]"`,
|
||||
},
|
||||
{
|
||||
doc: "Missing address, colon sep",
|
||||
input: []string{`myhost.invalid:`},
|
||||
expectedErr: `invalid IP address in add-host: ""`,
|
||||
},
|
||||
{
|
||||
doc: "Missing address, eq sep",
|
||||
input: []string{`myhost.invalid=`},
|
||||
expectedErr: `invalid IP address in add-host: ""`,
|
||||
},
|
||||
{
|
||||
doc: "No input",
|
||||
input: []string{``},
|
||||
expectedErr: `bad format for add-host: ""`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
if tc.expectedOut == "" {
|
||||
tc.expectedOut = strings.Join(tc.input, ",")
|
||||
}
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
actualOut, actualErr := toBuildkitExtraHosts(context.TODO(), tc.input, nil)
|
||||
if tc.expectedErr == "" {
|
||||
require.Equal(t, tc.expectedOut, actualOut)
|
||||
require.NoError(t, actualErr)
|
||||
} else {
|
||||
require.Zero(t, actualOut)
|
||||
require.Error(t, actualErr, tc.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,31 +2,18 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
k8sutil "github.com/docker/buildx/driver/kubernetes/util"
|
||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/google/shlex"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -121,7 +108,7 @@ func New(dockerCli command.Cli, opts ...Option) (_ *Builder, err error) {
|
||||
|
||||
// Validate validates builder context
|
||||
func (b *Builder) Validate() error {
|
||||
if b.NodeGroup != nil && b.NodeGroup.DockerContext {
|
||||
if b.NodeGroup.DockerContext {
|
||||
list, err := b.opts.dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -170,14 +157,13 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.AutoMode)
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
baseCtx := ctx
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
errCh := make(chan error, len(toBoot))
|
||||
for _, idx := range toBoot {
|
||||
func(idx int) {
|
||||
eg.Go(func() error {
|
||||
@@ -185,7 +171,6 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
||||
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
|
||||
if err != nil {
|
||||
b.nodes[idx].Err = err
|
||||
errCh <- err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -193,15 +178,11 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
err = eg.Wait()
|
||||
close(errCh)
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
|
||||
if err == nil && len(errCh) == len(toBoot) {
|
||||
return false, <-errCh
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -226,7 +207,7 @@ type driverFactory struct {
|
||||
}
|
||||
|
||||
// Factory returns the driver factory.
|
||||
func (b *Builder) Factory(ctx context.Context, dialMeta map[string][]string) (_ driver.Factory, err error) {
|
||||
func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
|
||||
b.driverFactory.once.Do(func() {
|
||||
if b.Driver != "" {
|
||||
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
|
||||
@@ -249,7 +230,7 @@ func (b *Builder) Factory(ctx context.Context, dialMeta map[string][]string) (_
|
||||
if _, err = dockerapi.Ping(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false, dialMeta)
|
||||
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -259,28 +240,6 @@ func (b *Builder) Factory(ctx context.Context, dialMeta map[string][]string) (_
|
||||
return b.driverFactory.Factory, err
|
||||
}
|
||||
|
||||
func (b *Builder) MarshalJSON() ([]byte, error) {
|
||||
var berr string
|
||||
if b.err != nil {
|
||||
berr = strings.TrimSpace(b.err.Error())
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
Name string
|
||||
Driver string
|
||||
LastActivity time.Time `json:",omitempty"`
|
||||
Dynamic bool
|
||||
Nodes []Node
|
||||
Err string `json:",omitempty"`
|
||||
}{
|
||||
Name: b.Name,
|
||||
Driver: b.Driver,
|
||||
LastActivity: b.LastActivity,
|
||||
Dynamic: b.Dynamic,
|
||||
Nodes: b.nodes,
|
||||
Err: berr,
|
||||
})
|
||||
}
|
||||
|
||||
// GetBuilders returns all builders
|
||||
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||
storeng, err := txn.List()
|
||||
@@ -288,15 +247,7 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(contexts, func(i, j int) bool {
|
||||
return contexts[i].Name < contexts[j].Name
|
||||
})
|
||||
|
||||
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||
builders := make([]*Builder, len(storeng))
|
||||
seen := make(map[string]struct{})
|
||||
for i, ng := range storeng {
|
||||
b, err := New(dockerCli,
|
||||
@@ -311,6 +262,14 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||
seen[b.NodeGroup.Name] = struct{}{}
|
||||
}
|
||||
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(contexts, func(i, j int) bool {
|
||||
return contexts[i].Name < contexts[j].Name
|
||||
})
|
||||
|
||||
for _, c := range contexts {
|
||||
// if a context has the same name as an instance from the store, do not
|
||||
// add it to the builders list. An instance from the store takes
|
||||
@@ -331,364 +290,3 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||
|
||||
return builders, nil
|
||||
}
|
||||
|
||||
type CreateOpts struct {
|
||||
Name string
|
||||
Driver string
|
||||
NodeName string
|
||||
Platforms []string
|
||||
BuildkitdFlags string
|
||||
BuildkitdConfigFile string
|
||||
DriverOpts []string
|
||||
Use bool
|
||||
Endpoint string
|
||||
Append bool
|
||||
}
|
||||
|
||||
func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts CreateOpts) (*Builder, error) {
|
||||
var err error
|
||||
|
||||
if opts.Name == "default" {
|
||||
return nil, errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
||||
} else if opts.Append && opts.Name == "" {
|
||||
return nil, errors.Errorf("append requires a builder name")
|
||||
}
|
||||
|
||||
name := opts.Name
|
||||
if name == "" {
|
||||
name, err = store.GenerateName(txn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.Append {
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, c := range contexts {
|
||||
if c.Name == name {
|
||||
return nil, errors.Errorf("instance name %q already exists as context builder", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ng, err := txn.NodeGroupByName(name)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if opts.Append && opts.Name != "" {
|
||||
return nil, errors.Errorf("failed to find instance %q for append", opts.Name)
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
||||
|
||||
driverName := opts.Driver
|
||||
if driverName == "" {
|
||||
if ng != nil {
|
||||
driverName = ng.Driver
|
||||
} else if opts.Endpoint == "" && buildkitHost != "" {
|
||||
driverName = "remote"
|
||||
} else {
|
||||
f, err := driver.GetDefaultFactory(ctx, opts.Endpoint, dockerCli.Client(), true, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f == nil {
|
||||
return nil, errors.Errorf("no valid drivers found")
|
||||
}
|
||||
driverName = f.Name()
|
||||
}
|
||||
}
|
||||
|
||||
if ng != nil {
|
||||
if opts.NodeName == "" && !opts.Append {
|
||||
return nil, errors.Errorf("existing instance for %q but no append mode, specify the node name to make changes for existing instances", name)
|
||||
}
|
||||
if driverName != ng.Driver {
|
||||
return nil, errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := driver.GetFactory(driverName, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ngOriginal := ng
|
||||
if ngOriginal != nil {
|
||||
ngOriginal = ngOriginal.Copy()
|
||||
}
|
||||
|
||||
if ng == nil {
|
||||
ng = &store.NodeGroup{
|
||||
Name: name,
|
||||
Driver: driverName,
|
||||
}
|
||||
}
|
||||
|
||||
driverOpts, err := csvToMap(opts.DriverOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buildkitdConfigFile := opts.BuildkitdConfigFile
|
||||
if buildkitdConfigFile == "" {
|
||||
// if buildkit daemon config is not provided, check if the default one
|
||||
// is available and use it
|
||||
if f, ok := confutil.NewConfig(dockerCli).BuildKitConfigFile(); ok {
|
||||
buildkitdConfigFile = f
|
||||
}
|
||||
}
|
||||
|
||||
buildkitdFlags, err := parseBuildkitdFlags(opts.BuildkitdFlags, driverName, driverOpts, buildkitdConfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ep string
|
||||
var setEp bool
|
||||
switch {
|
||||
case driverName == "kubernetes":
|
||||
if opts.Endpoint != "" {
|
||||
return nil, errors.Errorf("kubernetes driver does not support endpoint args %q", opts.Endpoint)
|
||||
}
|
||||
// generate node name if not provided to avoid duplicated endpoint
|
||||
// error: https://github.com/docker/setup-buildx-action/issues/215
|
||||
nodeName := opts.NodeName
|
||||
if nodeName == "" {
|
||||
nodeName, err = k8sutil.GenerateNodeName(name, txn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// naming endpoint to make append works
|
||||
ep = (&url.URL{
|
||||
Scheme: driverName,
|
||||
Path: "/" + name,
|
||||
RawQuery: (&url.Values{
|
||||
"deployment": {nodeName},
|
||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||
}).Encode(),
|
||||
}).String()
|
||||
setEp = false
|
||||
case driverName == "remote":
|
||||
if opts.Endpoint != "" {
|
||||
ep = opts.Endpoint
|
||||
} else if buildkitHost != "" {
|
||||
ep = buildkitHost
|
||||
} else {
|
||||
return nil, errors.Errorf("no remote endpoint provided")
|
||||
}
|
||||
ep, err = validateBuildkitEndpoint(ep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setEp = true
|
||||
case opts.Endpoint != "":
|
||||
ep, err = validateEndpoint(dockerCli, opts.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setEp = true
|
||||
default:
|
||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||
return nil, errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with context set to <context-name>")
|
||||
}
|
||||
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setEp = false
|
||||
}
|
||||
|
||||
if err := ng.Update(opts.NodeName, ep, opts.Platforms, setEp, opts.Append, buildkitdFlags, buildkitdConfigFile, driverOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := txn.Save(ng); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, err := New(dockerCli,
|
||||
WithName(ng.Name),
|
||||
WithStore(txn),
|
||||
WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if err := node.Err; err != nil {
|
||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||
var err2 error
|
||||
if ngOriginal == nil {
|
||||
err2 = txn.Remove(ng.Name)
|
||||
} else {
|
||||
err2 = txn.Save(ngOriginal)
|
||||
}
|
||||
if err2 != nil {
|
||||
return nil, errors.Errorf("could not rollback to previous state: %s", err2)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Use && ep != "" {
|
||||
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type LeaveOpts struct {
|
||||
Name string
|
||||
NodeName string
|
||||
}
|
||||
|
||||
func Leave(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts LeaveOpts) error {
|
||||
if opts.Name == "" {
|
||||
return errors.Errorf("leave requires instance name")
|
||||
}
|
||||
if opts.NodeName == "" {
|
||||
return errors.Errorf("leave requires node name")
|
||||
}
|
||||
|
||||
ng, err := txn.NodeGroupByName(opts.Name)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return errors.Errorf("failed to find instance %q for leave", opts.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ng.Leave(opts.NodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ls.RemoveBuilderNode(ng.Name, opts.NodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return txn.Save(ng)
|
||||
}
|
||||
|
||||
func csvToMap(in []string) (map[string]string, error) {
|
||||
if len(in) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
m := make(map[string]string, len(in))
|
||||
for _, s := range in {
|
||||
fields, err := csvvalue.Fields(s, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range fields {
|
||||
p := strings.SplitN(v, "=", 2)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
||||
}
|
||||
m[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||
if err == nil && dem != nil {
|
||||
if ep == "default" {
|
||||
return dem.Host, nil
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
h, err := dopts.ParseHost(true, ep)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
// parseBuildkitdFlags parses buildkit flags
|
||||
func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string, buildkitdConfigFile string) (res []string, err error) {
|
||||
if inp != "" {
|
||||
res, err = shlex.Split(inp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse buildkit flags")
|
||||
}
|
||||
}
|
||||
|
||||
var allowInsecureEntitlements []string
|
||||
flags := pflag.NewFlagSet("buildkitd", pflag.ContinueOnError)
|
||||
flags.Usage = func() {}
|
||||
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
|
||||
_ = flags.Parse(res)
|
||||
|
||||
var hasNetworkHostEntitlement bool
|
||||
for _, e := range allowInsecureEntitlements {
|
||||
if e == "network.host" {
|
||||
hasNetworkHostEntitlement = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var hasNetworkHostEntitlementInConf bool
|
||||
if buildkitdConfigFile != "" {
|
||||
btoml, err := confutil.LoadConfigTree(buildkitdConfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if btoml != nil {
|
||||
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
|
||||
for _, e := range ies.([]string) {
|
||||
if e == "network.host" {
|
||||
hasNetworkHostEntitlementInConf = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := driverOpts["network"]; ok && v == "host" && !hasNetworkHostEntitlement && driver == "docker-container" {
|
||||
// always set network.host entitlement if user has set network=host
|
||||
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||
} else if len(allowInsecureEntitlements) == 0 && !hasNetworkHostEntitlementInConf && (driver == "kubernetes" || driver == "docker-container") {
|
||||
// set network.host entitlement if user does not provide any as
|
||||
// network is isolated for container drivers.
|
||||
res = append(res, "--allow-insecure-entitlement=network.host")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -1,204 +0,0 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCsvToMap(t *testing.T) {
|
||||
d := []string{
|
||||
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
||||
"namespace=default",
|
||||
}
|
||||
r, err := csvToMap(d)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, r, "tolerations")
|
||||
require.Equal(t, "key=foo,value=bar;key=foo2,value=bar2", r["tolerations"])
|
||||
|
||||
require.Contains(t, r, "replicas")
|
||||
require.Equal(t, "1", r["replicas"])
|
||||
|
||||
require.Contains(t, r, "namespace")
|
||||
require.Equal(t, "default", r["namespace"])
|
||||
}
|
||||
|
||||
func TestParseBuildkitdFlags(t *testing.T) {
|
||||
dirConf := t.TempDir()
|
||||
|
||||
buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
|
||||
require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
|
||||
# debug enables additional debug logging
|
||||
debug = true
|
||||
# insecure-entitlements allows insecure entitlements, disabled by default.
|
||||
insecure-entitlements = [ "network.host", "security.insecure" ]
|
||||
[log]
|
||||
# log formatter: json or text
|
||||
format = "text"
|
||||
`), 0644))
|
||||
|
||||
buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
|
||||
require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
|
||||
[worker.oci]
|
||||
gc = "maybe"
|
||||
`), 0644))
|
||||
|
||||
buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
|
||||
require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
|
||||
foo = "bar"
|
||||
`), 0644))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
flags string
|
||||
driver string
|
||||
driverOpts map[string]string
|
||||
buildkitdConfigFile string
|
||||
expected []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"docker-container no flags",
|
||||
"",
|
||||
"docker-container",
|
||||
nil,
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"kubernetes no flags",
|
||||
"",
|
||||
"kubernetes",
|
||||
nil,
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"remote no flags",
|
||||
"",
|
||||
"remote",
|
||||
nil,
|
||||
"",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with insecure flag",
|
||||
"--allow-insecure-entitlement=security.insecure",
|
||||
"docker-container",
|
||||
nil,
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=security.insecure",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with insecure and host flag",
|
||||
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||
"docker-container",
|
||||
nil,
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
"--allow-insecure-entitlement=security.insecure",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with network host opt",
|
||||
"",
|
||||
"docker-container",
|
||||
map[string]string{"network": "host"},
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with host flag and network host opt",
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
"docker-container",
|
||||
map[string]string{"network": "host"},
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with insecure, host flag and network host opt",
|
||||
"--allow-insecure-entitlement=network.host --allow-insecure-entitlement=security.insecure",
|
||||
"docker-container",
|
||||
map[string]string{"network": "host"},
|
||||
"",
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
"--allow-insecure-entitlement=security.insecure",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"docker-container with buildkitd conf setting network.host entitlement",
|
||||
"",
|
||||
"docker-container",
|
||||
nil,
|
||||
buildkitdConfPath,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"error parsing flags",
|
||||
"foo'",
|
||||
"docker-container",
|
||||
nil,
|
||||
"",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"error parsing buildkit config",
|
||||
"",
|
||||
"docker-container",
|
||||
nil,
|
||||
buildkitdConfBrokenPath,
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"unknown field in buildkit config",
|
||||
"",
|
||||
"docker-container",
|
||||
nil,
|
||||
buildkitdConfUnknownFieldPath,
|
||||
[]string{
|
||||
"--allow-insecure-entitlement=network.host",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
flags, err := parseBuildkitdFlags(tt.flags, tt.driver, tt.driverOpts, tt.buildkitdConfigFile)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, flags)
|
||||
})
|
||||
}
|
||||
}
|
||||
164
builder/node.go
164
builder/node.go
@@ -2,40 +2,31 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
store.Node
|
||||
Builder string
|
||||
Driver *driver.DriverHandle
|
||||
Driver driver.Driver
|
||||
DriverInfo *driver.Info
|
||||
Platforms []ocispecs.Platform
|
||||
ImageOpt imagetools.Opt
|
||||
ProxyConfig map[string]string
|
||||
Version string
|
||||
Err error
|
||||
|
||||
// worker settings
|
||||
IDs []string
|
||||
Platforms []ocispecs.Platform
|
||||
GCPolicy []client.PruneInfo
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Nodes returns nodes for this builder.
|
||||
@@ -43,42 +34,9 @@ func (b *Builder) Nodes() []Node {
|
||||
return b.nodes
|
||||
}
|
||||
|
||||
type LoadNodesOption func(*loadNodesOptions)
|
||||
|
||||
type loadNodesOptions struct {
|
||||
data bool
|
||||
dialMeta map[string][]string
|
||||
clientOpt []client.ClientOpt
|
||||
}
|
||||
|
||||
func WithData() LoadNodesOption {
|
||||
return func(o *loadNodesOptions) {
|
||||
o.data = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithDialMeta(dialMeta map[string][]string) LoadNodesOption {
|
||||
return func(o *loadNodesOptions) {
|
||||
o.dialMeta = dialMeta
|
||||
}
|
||||
}
|
||||
|
||||
func WithClientOpt(clientOpt ...client.ClientOpt) LoadNodesOption {
|
||||
return func(o *loadNodesOptions) {
|
||||
o.clientOpt = clientOpt
|
||||
}
|
||||
}
|
||||
|
||||
// LoadNodes loads and returns nodes for this builder.
|
||||
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
||||
func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []Node, err error) {
|
||||
lno := loadNodesOptions{
|
||||
data: false,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&lno)
|
||||
}
|
||||
|
||||
func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
|
||||
|
||||
@@ -88,7 +46,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
}
|
||||
}()
|
||||
|
||||
factory, err := b.Factory(ctx, lno.dialMeta)
|
||||
factory, err := b.Factory(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -105,7 +63,6 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
Node: n,
|
||||
ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli),
|
||||
Platforms: n.Platforms,
|
||||
Builder: b.Name,
|
||||
}
|
||||
defer func() {
|
||||
b.nodes[i] = node
|
||||
@@ -117,19 +74,37 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
return nil
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, factory, driver.InitConfig{
|
||||
Name: driver.BuilderName(n.Name),
|
||||
EndpointAddr: n.Endpoint,
|
||||
DockerAPI: dockerapi,
|
||||
ContextStore: b.opts.dockerCli.ContextStore(),
|
||||
BuildkitdFlags: n.BuildkitdFlags,
|
||||
Files: n.Files,
|
||||
DriverOpts: n.DriverOpts,
|
||||
Auth: imageopt.Auth,
|
||||
Platforms: n.Platforms,
|
||||
ContextPathHash: b.opts.contextPathHash,
|
||||
DialMeta: lno.dialMeta,
|
||||
})
|
||||
contextStore := b.opts.dockerCli.ContextStore()
|
||||
|
||||
var kcc driver.KubeClientConfig
|
||||
kcc, err = ctxkube.ConfigFromContext(n.Endpoint, contextStore)
|
||||
if err != nil {
|
||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME(@AkihiroSuda): n should retain real context name.
|
||||
kcc, err = ctxkube.ConfigFromContext("default", contextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
tryToUseKubeConfigInCluster := false
|
||||
if kcc == nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
} else {
|
||||
if _, err := kcc.ClientConfig(); err != nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseKubeConfigInCluster {
|
||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
kcc = kccInCluster
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash)
|
||||
if err != nil {
|
||||
node.Err = err
|
||||
return nil
|
||||
@@ -137,8 +112,8 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
node.Driver = d
|
||||
node.ImageOpt = imageopt
|
||||
|
||||
if lno.data {
|
||||
if err := node.loadData(ctx, lno.clientOpt...); err != nil {
|
||||
if withData {
|
||||
if err := node.loadData(ctx); err != nil {
|
||||
node.Err = err
|
||||
}
|
||||
}
|
||||
@@ -152,7 +127,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
}
|
||||
|
||||
// TODO: This should be done in the routine loading driver data
|
||||
if lno.data {
|
||||
if withData {
|
||||
kubernetesDriverCount := 0
|
||||
for _, d := range b.nodes {
|
||||
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
|
||||
@@ -173,7 +148,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||
diClone.Platforms = pl
|
||||
}
|
||||
nodes = append(nodes, diClone)
|
||||
nodes = append(nodes, di)
|
||||
}
|
||||
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
|
||||
}
|
||||
@@ -189,52 +164,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
return b.nodes, nil
|
||||
}
|
||||
|
||||
func (n *Node) MarshalJSON() ([]byte, error) {
|
||||
var status string
|
||||
if n.DriverInfo != nil {
|
||||
status = n.DriverInfo.Status.String()
|
||||
}
|
||||
var nerr string
|
||||
if n.Err != nil {
|
||||
status = "error"
|
||||
nerr = strings.TrimSpace(n.Err.Error())
|
||||
}
|
||||
var pp []string
|
||||
for _, p := range n.Platforms {
|
||||
pp = append(pp, platforms.Format(p))
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
Name string
|
||||
Endpoint string
|
||||
BuildkitdFlags []string `json:"Flags,omitempty"`
|
||||
DriverOpts map[string]string `json:",omitempty"`
|
||||
Files map[string][]byte `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
ProxyConfig map[string]string `json:",omitempty"`
|
||||
Version string `json:",omitempty"`
|
||||
Err string `json:",omitempty"`
|
||||
IDs []string `json:",omitempty"`
|
||||
Platforms []string `json:",omitempty"`
|
||||
GCPolicy []client.PruneInfo `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
}{
|
||||
Name: n.Name,
|
||||
Endpoint: n.Endpoint,
|
||||
BuildkitdFlags: n.BuildkitdFlags,
|
||||
DriverOpts: n.DriverOpts,
|
||||
Files: n.Files,
|
||||
Status: status,
|
||||
ProxyConfig: n.ProxyConfig,
|
||||
Version: n.Version,
|
||||
Err: nerr,
|
||||
IDs: n.IDs,
|
||||
Platforms: pp,
|
||||
GCPolicy: n.GCPolicy,
|
||||
Labels: n.Labels,
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) error {
|
||||
func (n *Node) loadData(ctx context.Context) error {
|
||||
if n.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -244,7 +174,7 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
|
||||
}
|
||||
n.DriverInfo = info
|
||||
if n.DriverInfo.Status == driver.Running {
|
||||
driverClient, err := n.Driver.Client(ctx, clientOpt...)
|
||||
driverClient, err := n.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -252,15 +182,9 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "listing workers")
|
||||
}
|
||||
for idx, w := range workers {
|
||||
n.IDs = append(n.IDs, w.ID)
|
||||
for _, w := range workers {
|
||||
n.Platforms = append(n.Platforms, w.Platforms...)
|
||||
if idx == 0 {
|
||||
n.GCPolicy = w.GCPolicy
|
||||
n.Labels = w.Labels
|
||||
}
|
||||
}
|
||||
sort.Strings(n.IDs)
|
||||
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||
inf, err := driverClient.Info(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
|
||||
"github.com/moby/buildkit/util/bklog"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func setupDebugProfiles(ctx context.Context) (stop func()) {
|
||||
var stopFuncs []func()
|
||||
if fn := setupCPUProfile(ctx); fn != nil {
|
||||
stopFuncs = append(stopFuncs, fn)
|
||||
}
|
||||
if fn := setupHeapProfile(ctx); fn != nil {
|
||||
stopFuncs = append(stopFuncs, fn)
|
||||
}
|
||||
return func() {
|
||||
for _, fn := range stopFuncs {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupCPUProfile(ctx context.Context) (stop func()) {
|
||||
if cpuProfile := os.Getenv("BUILDX_CPU_PROFILE"); cpuProfile != "" {
|
||||
f, err := os.Create(cpuProfile)
|
||||
if err != nil {
|
||||
bklog.G(ctx).Warn("could not create cpu profile", logrus.WithError(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
bklog.G(ctx).Warn("could not start cpu profile", logrus.WithError(err))
|
||||
_ = f.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
return func() {
|
||||
pprof.StopCPUProfile()
|
||||
if err := f.Close(); err != nil {
|
||||
bklog.G(ctx).Warn("could not close file for cpu profile", logrus.WithError(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupHeapProfile(ctx context.Context) (stop func()) {
|
||||
if heapProfile := os.Getenv("BUILDX_MEM_PROFILE"); heapProfile != "" {
|
||||
// Memory profile is only created on stop.
|
||||
return func() {
|
||||
f, err := os.Create(heapProfile)
|
||||
if err != nil {
|
||||
bklog.G(ctx).Warn("could not create memory profile", logrus.WithError(err))
|
||||
return
|
||||
}
|
||||
|
||||
// get up-to-date statistics
|
||||
runtime.GC()
|
||||
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
bklog.G(ctx).Warn("could not write memory profile", logrus.WithError(err))
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
bklog.G(ctx).Warn("could not close file for memory profile", logrus.WithError(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/pkg/seed"
|
||||
"github.com/docker/buildx/commands"
|
||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/buildx/version"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
@@ -17,21 +15,20 @@ import (
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
|
||||
_ "github.com/docker/buildx/driver/docker"
|
||||
_ "github.com/docker/buildx/driver/docker-container"
|
||||
_ "github.com/docker/buildx/driver/kubernetes"
|
||||
_ "github.com/docker/buildx/driver/remote"
|
||||
|
||||
// Use custom grpc codec to utilize vtprotobuf
|
||||
_ "github.com/moby/buildkit/util/grpcutil/encoding/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
seed.WithTimeAndRand()
|
||||
stack.SetVersionInfo(version.Version, version.Revision)
|
||||
}
|
||||
|
||||
@@ -39,27 +36,10 @@ func runStandalone(cmd *command.DockerCli) error {
|
||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
||||
return err
|
||||
}
|
||||
defer flushMetrics(cmd)
|
||||
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
// flushMetrics will manually flush metrics from the configured
|
||||
// meter provider. This is needed when running in standalone mode
|
||||
// because the meter provider is initialized by the cli library,
|
||||
// but the mechanism for forcing it to report is not presently
|
||||
// exposed and not invoked when run in standalone mode.
|
||||
// There are plans to fix that in the next release, but this is
|
||||
// needed temporarily until the API for this is more thorough.
|
||||
func flushMetrics(cmd *command.DockerCli) {
|
||||
if mp, ok := cmd.MeterProvider().(command.MeterProvider); ok {
|
||||
if err := mp.ForceFlush(context.Background()); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runPlugin(cmd *command.DockerCli) error {
|
||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
||||
@@ -69,16 +49,6 @@ func runPlugin(cmd *command.DockerCli) error {
|
||||
})
|
||||
}
|
||||
|
||||
func run(cmd *command.DockerCli) error {
|
||||
stopProfiles := setupDebugProfiles(context.TODO())
|
||||
defer stopProfiles()
|
||||
|
||||
if plugin.RunningStandalone() {
|
||||
return runStandalone(cmd)
|
||||
}
|
||||
return runPlugin(cmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cmd, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
@@ -86,11 +56,15 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = run(cmd); err == nil {
|
||||
if plugin.RunningStandalone() {
|
||||
err = runStandalone(cmd)
|
||||
} else {
|
||||
err = runPlugin(cmd)
|
||||
}
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check the error from the run function above.
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
if sterr.Status != "" {
|
||||
fmt.Fprintln(cmd.Err(), sterr.Status)
|
||||
@@ -112,15 +86,5 @@ func main() {
|
||||
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
||||
}
|
||||
|
||||
var ebr *desktop.ErrorWithBuildRef
|
||||
if errors.As(err, &ebr) {
|
||||
ebr.Print(cmd.Err())
|
||||
} else {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
be.PrintBuildDetails(cmd.Err())
|
||||
}
|
||||
}
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
||||
_ "github.com/moby/buildkit/util/tracing/env"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1 @@
|
||||
comment: false
|
||||
|
||||
ignore:
|
||||
- "**/*.pb.go"
|
||||
|
||||
746
commands/bake.go
746
commands/bake.go
@@ -1,70 +1,35 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/bake/hclparser"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
type bakeOptions struct {
|
||||
files []string
|
||||
overrides []string
|
||||
|
||||
sbom string
|
||||
provenance string
|
||||
allow []string
|
||||
|
||||
builder string
|
||||
metadataFile string
|
||||
exportPush bool
|
||||
exportLoad bool
|
||||
callFunc string
|
||||
|
||||
print bool
|
||||
list string
|
||||
|
||||
// TODO: remove deprecated flags
|
||||
listTargets bool
|
||||
listVars bool
|
||||
printOnly bool
|
||||
commonOptions
|
||||
}
|
||||
|
||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||
mp := dockerCli.MeterProvider()
|
||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||
if err != nil {
|
||||
@@ -74,31 +39,40 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
end(err)
|
||||
}()
|
||||
|
||||
url, cmdContext, targets := bakeArgs(targets)
|
||||
var url string
|
||||
cmdContext := "cwd://"
|
||||
|
||||
if len(targets) > 0 {
|
||||
if bake.IsRemoteURL(targets[0]) {
|
||||
url = targets[0]
|
||||
targets = targets[1:]
|
||||
if len(targets) > 0 {
|
||||
if bake.IsRemoteURL(targets[0]) {
|
||||
cmdContext = targets[0]
|
||||
targets = targets[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = []string{"default"}
|
||||
}
|
||||
|
||||
callFunc, err := buildflags.ParseCallFunc(in.callFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
overrides := in.overrides
|
||||
if in.exportPush {
|
||||
if in.exportLoad {
|
||||
return errors.Errorf("push and load may not be set together at the moment")
|
||||
}
|
||||
overrides = append(overrides, "*.push=true")
|
||||
} else if in.exportLoad {
|
||||
overrides = append(overrides, "*.output=type=docker")
|
||||
}
|
||||
if in.exportLoad {
|
||||
overrides = append(overrides, "*.load=true")
|
||||
if in.noCache != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *in.noCache))
|
||||
}
|
||||
if callFunc != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.call=%s", callFunc.Name))
|
||||
}
|
||||
if cFlags.noCache != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
|
||||
}
|
||||
if cFlags.pull != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *cFlags.pull))
|
||||
if in.pull != nil {
|
||||
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
|
||||
}
|
||||
if in.sbom != "" {
|
||||
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.sbom)))
|
||||
@@ -108,31 +82,28 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
contextPathHash, _ := os.Getwd()
|
||||
|
||||
ent, err := bake.ParseEntitlements(in.allow)
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get current working directory")
|
||||
}
|
||||
// filesystem access under the current working directory is allowed by default
|
||||
ent.FSRead = append(ent.FSRead, wd)
|
||||
ent.FSWrite = append(ent.FSWrite, wd)
|
||||
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer cancel(errors.WithStack(context.Canceled))
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var nodes []builder.Node
|
||||
var progressConsoleDesc, progressTextDesc string
|
||||
|
||||
if in.print && in.list != "" {
|
||||
return errors.New("--print and --list are mutually exclusive")
|
||||
}
|
||||
var files []bake.File
|
||||
var inp *bake.Input
|
||||
|
||||
// instance only needed for reading remote bake files or building
|
||||
var driverType string
|
||||
if url != "" || !(in.print || in.list != "") {
|
||||
if url != "" || !in.printOnly {
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
@@ -143,291 +114,77 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||
}
|
||||
nodes, err = b.LoadNodes(ctx)
|
||||
nodes, err = b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
||||
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
||||
driverType = b.Driver
|
||||
}
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||
term = true
|
||||
if url != "" {
|
||||
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer)
|
||||
} else {
|
||||
files, err = bake.ReadLocalFiles(in.files)
|
||||
}
|
||||
attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in)
|
||||
|
||||
progressMode := progressui.DisplayMode(cFlags.progress)
|
||||
var printer *progress.Printer
|
||||
|
||||
makePrinter := func() error {
|
||||
var err error
|
||||
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
progress.WithMetrics(mp, attributes),
|
||||
progress.WithOnClose(func() {
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
}),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := makePrinter(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
return errors.New("couldn't find a bake definition")
|
||||
}
|
||||
|
||||
defaults := map[string]string{
|
||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||
// don't forget to update documentation if you add a new
|
||||
// built-in variable: docs/bake-reference.md#built-in-variables
|
||||
// built-in variable: docs/manuals/bake/file-definition.md#built-in-variables
|
||||
"BAKE_CMD_CONTEXT": cmdContext,
|
||||
"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
|
||||
}
|
||||
|
||||
if in.list != "" {
|
||||
cfg, pm, err := bake.ParseFiles(files, defaults)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
list, err := parseList(in.list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch list.Type {
|
||||
case "targets":
|
||||
return printTargetList(dockerCli.Out(), list.Format, cfg)
|
||||
case "variables":
|
||||
return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
|
||||
}
|
||||
}
|
||||
|
||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, defaults, &ent)
|
||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v := os.Getenv("SOURCE_DATE_EPOCH"); v != "" {
|
||||
// TODO: extract env var parsing to a method easily usable by library consumers
|
||||
for _, t := range tgts {
|
||||
if _, ok := t.Args["SOURCE_DATE_EPOCH"]; ok {
|
||||
continue
|
||||
}
|
||||
if t.Args == nil {
|
||||
t.Args = map[string]*string{}
|
||||
}
|
||||
t.Args["SOURCE_DATE_EPOCH"] = &v
|
||||
}
|
||||
}
|
||||
|
||||
// this function can update target context string from the input so call before printOnly check
|
||||
bo, err := bake.TargetsToBuildOpt(tgts, inp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
def := struct {
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]*bake.Target `json:"target"`
|
||||
}{
|
||||
Group: grps,
|
||||
Target: tgts,
|
||||
}
|
||||
|
||||
if in.print {
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if in.printOnly {
|
||||
dt, err := json.MarshalIndent(struct {
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]*bake.Target `json:"target"`
|
||||
}{
|
||||
grps,
|
||||
tgts,
|
||||
}, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(dockerCli.Out(), string(dtdef))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, opt := range bo {
|
||||
if opt.CallFunc != nil {
|
||||
cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt.CallFunc.Name = cf.Name
|
||||
}
|
||||
}
|
||||
|
||||
exp, err := ent.Validate(bo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||
return err
|
||||
}
|
||||
if printer.IsDone() {
|
||||
// init new printer as old one was stopped to show the prompt
|
||||
if err := makePrinter(); err != nil {
|
||||
err = printer.Wait()
|
||||
printer = nil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), printer)
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
if retErr != nil {
|
||||
err = wrapBuildError(retErr, true)
|
||||
}
|
||||
done(err)
|
||||
|
||||
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return err
|
||||
return wrapBuildError(err, true)
|
||||
}
|
||||
|
||||
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if callFunc == nil {
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
}
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var callFormatJSON bool
|
||||
jsonResults := map[string]map[string]any{}
|
||||
if callFunc != nil {
|
||||
callFormatJSON = callFunc.Format == "json"
|
||||
}
|
||||
var sep bool
|
||||
var exitCode int
|
||||
|
||||
names := make([]string, 0, len(bo))
|
||||
for name := range bo {
|
||||
names = append(names, name)
|
||||
}
|
||||
slices.Sort(names)
|
||||
|
||||
for _, name := range names {
|
||||
req := bo[name]
|
||||
if req.CallFunc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pf := &pb.CallFunc{
|
||||
Name: req.CallFunc.Name,
|
||||
Format: req.CallFunc.Format,
|
||||
IgnoreStatus: req.CallFunc.IgnoreStatus,
|
||||
}
|
||||
|
||||
if callFunc != nil {
|
||||
pf.Format = callFunc.Format
|
||||
pf.IgnoreStatus = callFunc.IgnoreStatus
|
||||
}
|
||||
|
||||
var res map[string]string
|
||||
if sp, ok := resp[name]; ok {
|
||||
res = sp.ExporterResponse
|
||||
}
|
||||
|
||||
if callFormatJSON {
|
||||
jsonResults[name] = map[string]any{}
|
||||
buf := &bytes.Buffer{}
|
||||
if code, err := printResult(buf, pf, res, name, &req.Inputs); err != nil {
|
||||
jsonResults[name]["error"] = err.Error()
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
exitCode = code
|
||||
}
|
||||
m := map[string]*json.RawMessage{}
|
||||
if err := json.Unmarshal(buf.Bytes(), &m); err == nil {
|
||||
for k, v := range m {
|
||||
jsonResults[name][k] = v
|
||||
}
|
||||
} else {
|
||||
jsonResults[name][pf.Name] = json.RawMessage(buf.Bytes())
|
||||
}
|
||||
} else {
|
||||
if sep {
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
} else {
|
||||
sep = true
|
||||
}
|
||||
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||
if descr := tgts[name].Description; descr != "" {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s\n", descr)
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
if code, err := printResult(dockerCli.Out(), pf, res, name, &req.Inputs); err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
exitCode = code
|
||||
}
|
||||
}
|
||||
}
|
||||
if callFormatJSON {
|
||||
out := struct {
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]map[string]any `json:"target"`
|
||||
}{
|
||||
Group: grps,
|
||||
Target: map[string]map[string]any{},
|
||||
}
|
||||
|
||||
for name, def := range tgts {
|
||||
out.Target[name] = map[string]any{
|
||||
"build": def,
|
||||
}
|
||||
if res, ok := jsonResults[name]; ok {
|
||||
printName := bo[name].CallFunc.Name
|
||||
if printName == "lint" {
|
||||
printName = "check"
|
||||
}
|
||||
out.Target[name][printName] = res
|
||||
}
|
||||
}
|
||||
dt, err := json.MarshalIndent(out, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options bakeOptions
|
||||
var cFlags commonFlags
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "bake [OPTIONS] [TARGET...]",
|
||||
@@ -436,378 +193,27 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// reset to nil to avoid override is unset
|
||||
if !cmd.Flags().Lookup("no-cache").Changed {
|
||||
cFlags.noCache = nil
|
||||
options.noCache = nil
|
||||
}
|
||||
if !cmd.Flags().Lookup("pull").Changed {
|
||||
cFlags.pull = nil
|
||||
options.pull = nil
|
||||
}
|
||||
if options.list == "" {
|
||||
if options.listTargets {
|
||||
options.list = "targets"
|
||||
} else if options.listVars {
|
||||
options.list = "variables"
|
||||
}
|
||||
}
|
||||
options.builder = rootOpts.builder
|
||||
options.metadataFile = cFlags.metadataFile
|
||||
// Other common flags (noCache, pull and progress) are processed in runBake function.
|
||||
return runBake(cmd.Context(), dockerCli, args, options, cFlags)
|
||||
options.commonOptions.builder = rootOpts.builder
|
||||
return runBake(dockerCli, args, options)
|
||||
},
|
||||
ValidArgsFunction: completion.BakeTargets(options.files),
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
|
||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources")
|
||||
|
||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.Lookup("check").NoOptDefVal = "true"
|
||||
|
||||
flags.BoolVar(&options.print, "print", false, "Print the options without building")
|
||||
flags.StringVar(&options.list, "list", "", "List targets or variables")
|
||||
|
||||
// TODO: remove deprecated flags
|
||||
flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
|
||||
flags.MarkHidden("list-targets")
|
||||
flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
|
||||
flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
|
||||
flags.MarkHidden("list-variables")
|
||||
flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
|
||||
|
||||
commonBuildFlags(&cFlags, flags)
|
||||
commonBuildFlags(&options.commonOptions, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
||||
prm := confutil.MetadataProvenance()
|
||||
if len(in.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
groupRef := identity.NewID()
|
||||
refs := make([]string, 0, len(bo))
|
||||
for k, b := range bo {
|
||||
if b.CallFunc != nil {
|
||||
continue
|
||||
}
|
||||
b.Ref = identity.NewID()
|
||||
b.GroupRef = groupRef
|
||||
b.ProvenanceResponseMode = prm
|
||||
refs = append(refs, b.Ref)
|
||||
bo[k] = b
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return nil
|
||||
}
|
||||
l, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||
Definition: dtdef,
|
||||
Targets: targets,
|
||||
Inputs: overrides,
|
||||
Refs: refs,
|
||||
})
|
||||
}
|
||||
|
||||
// bakeArgs will retrieve the remote url, command context, and targets
|
||||
// from the command line arguments.
|
||||
func bakeArgs(args []string) (url, cmdContext string, targets []string) {
|
||||
cmdContext, targets = "cwd://", args
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
url, targets = targets[0], targets[1:]
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
cmdContext, targets = targets[0], targets[1:]
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
|
||||
func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) {
|
||||
var lnames []string // local
|
||||
var rnames []string // remote
|
||||
var anames []string // both
|
||||
for _, v := range names {
|
||||
if strings.HasPrefix(v, "cwd://") {
|
||||
tname := strings.TrimPrefix(v, "cwd://")
|
||||
lnames = append(lnames, tname)
|
||||
anames = append(anames, tname)
|
||||
} else {
|
||||
rnames = append(rnames, v)
|
||||
anames = append(anames, v)
|
||||
}
|
||||
}
|
||||
|
||||
if url != "" {
|
||||
var rfiles []bake.File
|
||||
rfiles, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, rnames, pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
files = append(files, rfiles...)
|
||||
}
|
||||
|
||||
if len(lnames) > 0 || url == "" {
|
||||
var lfiles []bake.File
|
||||
progress.Wrap("[internal] load local bake definitions", pw.Write, func(sub progress.SubLogger) error {
|
||||
if url != "" {
|
||||
lfiles, err = bake.ReadLocalFiles(lnames, stdin, sub)
|
||||
} else {
|
||||
lfiles, err = bake.ReadLocalFiles(anames, stdin, sub)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
files = append(files, lfiles...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type listEntry struct {
|
||||
Type string
|
||||
Format string
|
||||
}
|
||||
|
||||
func parseList(input string) (listEntry, error) {
|
||||
res := listEntry{}
|
||||
|
||||
fields, err := csvvalue.Fields(input, nil)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
|
||||
res.Type = input
|
||||
}
|
||||
|
||||
if res.Type == "" {
|
||||
for _, field := range fields {
|
||||
key, value, ok := strings.Cut(field, "=")
|
||||
if !ok {
|
||||
return res, errors.Errorf("invalid value %s", field)
|
||||
}
|
||||
key = strings.TrimSpace(strings.ToLower(key))
|
||||
switch key {
|
||||
case "type":
|
||||
res.Type = value
|
||||
case "format":
|
||||
res.Format = value
|
||||
default:
|
||||
return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
}
|
||||
if res.Format == "" {
|
||||
res.Format = "table"
|
||||
}
|
||||
|
||||
switch res.Type {
|
||||
case "targets", "variables":
|
||||
default:
|
||||
return res, errors.Errorf("invalid list type %q", res.Type)
|
||||
}
|
||||
|
||||
switch res.Format {
|
||||
case "table", "json":
|
||||
default:
|
||||
return res, errors.Errorf("invalid list format %q", res.Format)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
|
||||
slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
|
||||
if format == "json" {
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(vars)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
defer tw.Flush()
|
||||
|
||||
tw.Write([]byte("VARIABLE\tVALUE\tDESCRIPTION\n"))
|
||||
|
||||
for _, v := range vars {
|
||||
var value string
|
||||
if v.Value != nil {
|
||||
value = *v.Value
|
||||
} else {
|
||||
value = "<null>"
|
||||
}
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\n", v.Name, value, v.Description)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
|
||||
type targetOrGroup struct {
|
||||
name string
|
||||
target *bake.Target
|
||||
group *bake.Group
|
||||
}
|
||||
|
||||
list := make([]targetOrGroup, 0, len(cfg.Targets)+len(cfg.Groups))
|
||||
for _, tgt := range cfg.Targets {
|
||||
list = append(list, targetOrGroup{name: tgt.Name, target: tgt})
|
||||
}
|
||||
for _, grp := range cfg.Groups {
|
||||
list = append(list, targetOrGroup{name: grp.Name, group: grp})
|
||||
}
|
||||
|
||||
slices.SortFunc(list, func(a, b targetOrGroup) int {
|
||||
return cmp.Compare(a.name, b.name)
|
||||
})
|
||||
|
||||
var tw *tabwriter.Writer
|
||||
if format == "table" {
|
||||
tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
defer tw.Flush()
|
||||
tw.Write([]byte("TARGET\tDESCRIPTION\n"))
|
||||
}
|
||||
|
||||
type targetList struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Group bool `json:"group,omitempty"`
|
||||
}
|
||||
var targetsList []targetList
|
||||
|
||||
for _, tgt := range list {
|
||||
if strings.HasPrefix(tgt.name, "_") {
|
||||
// convention for a private target
|
||||
continue
|
||||
}
|
||||
var descr string
|
||||
if tgt.target != nil {
|
||||
descr = tgt.target.Description
|
||||
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
|
||||
} else if tgt.group != nil {
|
||||
descr = tgt.group.Description
|
||||
if len(tgt.group.Targets) > 0 {
|
||||
slices.Sort(tgt.group.Targets)
|
||||
names := strings.Join(tgt.group.Targets, ", ")
|
||||
if descr != "" {
|
||||
descr += " (" + names + ")"
|
||||
} else {
|
||||
descr = names
|
||||
}
|
||||
}
|
||||
targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
|
||||
}
|
||||
if format == "table" {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
|
||||
}
|
||||
}
|
||||
|
||||
if format == "json" {
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(targetsList)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set {
|
||||
return attribute.NewSet(
|
||||
commandNameAttribute.String("bake"),
|
||||
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||
bakeOptions: options,
|
||||
cfg: confutil.NewConfig(dockerCli),
|
||||
url: url,
|
||||
cmdContext: cmdContext,
|
||||
targets: targets,
|
||||
}),
|
||||
driverNameAttribute.String(options.builder),
|
||||
driverTypeAttribute.String(driverType),
|
||||
)
|
||||
}
|
||||
|
||||
type bakeOptionsHash struct {
|
||||
*bakeOptions
|
||||
cfg *confutil.Config
|
||||
url string
|
||||
cmdContext string
|
||||
targets []string
|
||||
result string
|
||||
resultOnce sync.Once
|
||||
}
|
||||
|
||||
func (o *bakeOptionsHash) String() string {
|
||||
o.resultOnce.Do(func() {
|
||||
url := o.url
|
||||
cmdContext := o.cmdContext
|
||||
if cmdContext == "cwd://" {
|
||||
// Resolve the directory if the cmdContext is the current working directory.
|
||||
cmdContext = osutil.GetWd()
|
||||
}
|
||||
|
||||
// Sort the inputs for files and targets since the ordering
|
||||
// doesn't matter, but avoid modifying the original slice.
|
||||
files := immutableSort(o.files)
|
||||
targets := immutableSort(o.targets)
|
||||
|
||||
joinedFiles := strings.Join(files, ",")
|
||||
joinedTargets := strings.Join(targets, ",")
|
||||
salt := o.cfg.TryNodeIdentifier()
|
||||
|
||||
h := sha256.New()
|
||||
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||
_, _ = io.WriteString(h, s)
|
||||
h.Write([]byte{0})
|
||||
}
|
||||
o.result = hex.EncodeToString(h.Sum(nil))
|
||||
})
|
||||
return o.result
|
||||
}
|
||||
|
||||
// immutableSort will sort the entries in s without modifying the original slice.
|
||||
func immutableSort(s []string) []string {
|
||||
if !sort.StringsAreSorted(s) {
|
||||
cpy := make([]string, len(s))
|
||||
copy(cpy, s)
|
||||
sort.Strings(cpy)
|
||||
return cpy
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
w io.Writer
|
||||
once sync.Once
|
||||
wait func() error
|
||||
}
|
||||
|
||||
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||
w.once.Do(func() {
|
||||
if w.wait != nil {
|
||||
err = w.wait()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.w.Write(p)
|
||||
}
|
||||
|
||||
1304
commands/build.go
1304
commands/build.go
File diff suppressed because it is too large
Load Diff
@@ -3,72 +3,283 @@ package commands
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/google/shlex"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type createOptions struct {
|
||||
name string
|
||||
driver string
|
||||
nodeName string
|
||||
platform []string
|
||||
actionAppend bool
|
||||
actionLeave bool
|
||||
use bool
|
||||
driverOpts []string
|
||||
buildkitdFlags string
|
||||
buildkitdConfigFile string
|
||||
bootstrap bool
|
||||
name string
|
||||
driver string
|
||||
nodeName string
|
||||
platform []string
|
||||
actionAppend bool
|
||||
actionLeave bool
|
||||
use bool
|
||||
flags string
|
||||
configFile string
|
||||
driverOpts []string
|
||||
bootstrap bool
|
||||
// upgrade bool // perform upgrade of the driver
|
||||
}
|
||||
|
||||
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.name == "default" {
|
||||
return errors.Errorf("default is a reserved name and cannot be used to identify builder instance")
|
||||
}
|
||||
|
||||
if in.actionLeave {
|
||||
if in.name == "" {
|
||||
return errors.Errorf("leave requires instance name")
|
||||
}
|
||||
if in.nodeName == "" {
|
||||
return errors.Errorf("leave requires node name but --node not set")
|
||||
}
|
||||
}
|
||||
|
||||
if in.actionAppend {
|
||||
if in.name == "" {
|
||||
logrus.Warnf("append used without name, creating a new instance instead")
|
||||
}
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure the file lock gets released no matter what happens.
|
||||
defer release()
|
||||
|
||||
if in.actionLeave {
|
||||
return builder.Leave(ctx, txn, dockerCli, builder.LeaveOpts{
|
||||
Name: in.name,
|
||||
NodeName: in.nodeName,
|
||||
})
|
||||
name := in.name
|
||||
if name == "" {
|
||||
name, err = store.GenerateName(txn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !in.actionLeave && !in.actionAppend {
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range contexts {
|
||||
if c.Name == name {
|
||||
logrus.Warnf("instance name %q already exists as context builder", name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ng, err := txn.NodeGroupByName(name)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if in.actionAppend && in.name != "" {
|
||||
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
||||
}
|
||||
if in.actionLeave {
|
||||
return errors.Errorf("failed to find instance %q for leave", in.name)
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
||||
|
||||
driverName := in.driver
|
||||
if driverName == "" {
|
||||
if ng != nil {
|
||||
driverName = ng.Driver
|
||||
} else if len(args) == 0 && buildkitHost != "" {
|
||||
driverName = "remote"
|
||||
} else {
|
||||
var arg string
|
||||
if len(args) > 0 {
|
||||
arg = args[0]
|
||||
}
|
||||
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f == nil {
|
||||
return errors.Errorf("no valid drivers found")
|
||||
}
|
||||
driverName = f.Name()
|
||||
}
|
||||
}
|
||||
|
||||
if ng != nil {
|
||||
if in.nodeName == "" && !in.actionAppend {
|
||||
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name)
|
||||
}
|
||||
if driverName != ng.Driver {
|
||||
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := driver.GetFactory(driverName, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ngOriginal := ng
|
||||
if ngOriginal != nil {
|
||||
ngOriginal = ngOriginal.Copy()
|
||||
}
|
||||
|
||||
if ng == nil {
|
||||
ng = &store.NodeGroup{
|
||||
Name: name,
|
||||
Driver: driverName,
|
||||
}
|
||||
}
|
||||
|
||||
var flags []string
|
||||
if in.flags != "" {
|
||||
flags, err = shlex.Split(in.flags)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse buildkit flags")
|
||||
}
|
||||
}
|
||||
|
||||
var ep string
|
||||
if len(args) > 0 {
|
||||
ep = args[0]
|
||||
var setEp bool
|
||||
if in.actionLeave {
|
||||
if err := ng.Leave(in.nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
switch {
|
||||
case driverName == "kubernetes":
|
||||
if len(args) > 0 {
|
||||
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0])
|
||||
}
|
||||
// naming endpoint to make --append works
|
||||
ep = (&url.URL{
|
||||
Scheme: driverName,
|
||||
Path: "/" + in.name,
|
||||
RawQuery: (&url.Values{
|
||||
"deployment": {in.nodeName},
|
||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||
}).Encode(),
|
||||
}).String()
|
||||
setEp = false
|
||||
case driverName == "remote":
|
||||
if len(args) > 0 {
|
||||
ep = args[0]
|
||||
} else if buildkitHost != "" {
|
||||
ep = buildkitHost
|
||||
} else {
|
||||
return errors.Errorf("no remote endpoint provided")
|
||||
}
|
||||
ep, err = validateBuildkitEndpoint(ep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setEp = true
|
||||
case len(args) > 0:
|
||||
ep, err = validateEndpoint(dockerCli, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setEp = true
|
||||
default:
|
||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
||||
}
|
||||
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setEp = false
|
||||
}
|
||||
|
||||
m, err := csvToMap(in.driverOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in.configFile == "" {
|
||||
// if buildkit config is not provided, check if the default one is
|
||||
// available and use it
|
||||
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
||||
logrus.Warnf("Using default BuildKit config in %s", f)
|
||||
in.configFile = f
|
||||
}
|
||||
}
|
||||
|
||||
if err := ng.Update(in.nodeName, ep, in.platform, setEp, in.actionAppend, flags, in.configFile, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b, err := builder.Create(ctx, txn, dockerCli, builder.CreateOpts{
|
||||
Name: in.name,
|
||||
Driver: in.driver,
|
||||
NodeName: in.nodeName,
|
||||
Platforms: in.platform,
|
||||
DriverOpts: in.driverOpts,
|
||||
BuildkitdFlags: in.buildkitdFlags,
|
||||
BuildkitdConfigFile: in.buildkitdConfigFile,
|
||||
Use: in.use,
|
||||
Endpoint: ep,
|
||||
Append: in.actionAppend,
|
||||
})
|
||||
if err := txn.Save(ng); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(ng.Name),
|
||||
builder.WithStore(txn),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The store is no longer used from this point.
|
||||
// Release it so we aren't holding the file lock during the boot.
|
||||
release()
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if err := node.Err; err != nil {
|
||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||
var err2 error
|
||||
if ngOriginal == nil {
|
||||
err2 = txn.Remove(ng.Name)
|
||||
} else {
|
||||
err2 = txn.Save(ngOriginal)
|
||||
}
|
||||
if err2 != nil {
|
||||
logrus.Warnf("Could not rollback to previous state: %s", err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if in.use && ep != "" {
|
||||
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.SetCurrent(current, ng.Name, false, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if in.bootstrap {
|
||||
if _, err = b.Boot(ctx); err != nil {
|
||||
@@ -76,7 +287,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", b.Name)
|
||||
fmt.Printf("%s\n", ng.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,9 +307,8 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "Create a new builder instance",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||
return runCreate(dockerCli, options, args)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
@@ -106,16 +316,12 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||
flags.StringVar(&options.buildkitdFlags, "buildkitd-flags", "", "BuildKit daemon flags")
|
||||
|
||||
// we allow for both "--config" and "--buildkitd-config", although the latter is the recommended way to avoid ambiguity.
|
||||
flags.StringVar(&options.buildkitdConfigFile, "buildkitd-config", "", "BuildKit daemon config file")
|
||||
flags.StringVar(&options.buildkitdConfigFile, "config", "", "BuildKit daemon config file")
|
||||
flags.MarkHidden("config")
|
||||
|
||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
||||
|
||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
||||
@@ -125,3 +331,49 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func csvToMap(in []string) (map[string]string, error) {
|
||||
if len(in) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
m := make(map[string]string, len(in))
|
||||
for _, s := range in {
|
||||
csvReader := csv.NewReader(strings.NewReader(s))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range fields {
|
||||
p := strings.SplitN(v, "=", 2)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.Errorf("invalid value %q, expecting k=v", v)
|
||||
}
|
||||
m[p[0]] = p[1]
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||
if err == nil && dem != nil {
|
||||
if ep == "default" {
|
||||
return dem.Host, nil
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
h, err := dopts.ParseHost(true, ep)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
26
commands/create_test.go
Normal file
26
commands/create_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCsvToMap(t *testing.T) {
|
||||
d := []string{
|
||||
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
||||
"namespace=default",
|
||||
}
|
||||
r, err := csvToMap(d)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, r, "tolerations")
|
||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
||||
|
||||
require.Contains(t, r, "replicas")
|
||||
require.Equal(t, r["replicas"], "1")
|
||||
|
||||
require.Contains(t, r, "namespace")
|
||||
require.Equal(t, r["namespace"], "default")
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/controller"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/monitor"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// DebugConfig is a user-specified configuration for the debugger.
|
||||
type DebugConfig struct {
|
||||
// InvokeFlag is a flag to configure the launched debugger and the commaned executed on the debugger.
|
||||
InvokeFlag string
|
||||
|
||||
// OnFlag is a flag to configure the timing of launching the debugger.
|
||||
OnFlag string
|
||||
}
|
||||
|
||||
// DebuggableCmd is a command that supports debugger with recognizing the user-specified DebugConfig.
|
||||
type DebuggableCmd interface {
|
||||
// NewDebugger returns the new *cobra.Command with support for the debugger with recognizing DebugConfig.
|
||||
NewDebugger(*DebugConfig) *cobra.Command
|
||||
}
|
||||
|
||||
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
||||
var controlOptions control.ControlOptions
|
||||
var progressMode string
|
||||
var options DebugConfig
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "Start debugger",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, progressui.DisplayMode(progressMode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("failed to close server connection %v", err)
|
||||
}
|
||||
}()
|
||||
con := console.Current()
|
||||
if err := con.SetRaw(); err != nil {
|
||||
return errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
|
||||
_, err = monitor.RunMonitor(ctx, "", nil, &controllerapi.InvokeConfig{
|
||||
Tty: true,
|
||||
}, c, dockerCli.In(), os.Stdout, os.Stderr, printer)
|
||||
con.Reset()
|
||||
return err
|
||||
},
|
||||
}
|
||||
cobrautil.MarkCommandExperimental(cmd)
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
|
||||
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
|
||||
|
||||
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
|
||||
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
|
||||
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
|
||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
|
||||
|
||||
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
|
||||
|
||||
for _, c := range children {
|
||||
cmd.AddCommand(c.NewDebugger(&options))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type stdioOptions struct {
|
||||
builder string
|
||||
platform string
|
||||
progress string
|
||||
}
|
||||
|
||||
func runDialStdio(dockerCli command.Cli, opts stdioOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
contextPathHash, _ := os.Getwd()
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(opts.builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||
return errors.Wrapf(err, "failed to update builder last activity time")
|
||||
}
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printer, err := progress.NewPrinter(ctx, os.Stderr, progressui.DisplayMode(opts.progress), progress.WithPhase("dial-stdio"), progress.WithDesc("builder: "+b.Name, "builder:"+b.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var p *v1.Platform
|
||||
if opts.platform != "" {
|
||||
pp, err := platforms.Parse(opts.platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid platform %q", opts.platform)
|
||||
}
|
||||
p = &pp
|
||||
}
|
||||
|
||||
defer printer.Wait()
|
||||
|
||||
return progress.Wrap("Proxying to builder", printer.Write, func(sub progress.SubLogger) error {
|
||||
var conn net.Conn
|
||||
|
||||
err := sub.Wrap("Dialing builder", func() error {
|
||||
conn, err = build.Dial(ctx, nodes, printer, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
closeWrite(conn)
|
||||
}()
|
||||
|
||||
var eg errgroup.Group
|
||||
|
||||
eg.Go(func() error {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
closeWrite(conn)
|
||||
return err
|
||||
})
|
||||
eg.Go(func() error {
|
||||
_, err := io.Copy(os.Stdout, conn)
|
||||
closeRead(conn)
|
||||
return err
|
||||
})
|
||||
return eg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func closeRead(conn net.Conn) error {
|
||||
if c, ok := conn.(interface{ CloseRead() error }); ok {
|
||||
return c.CloseRead()
|
||||
}
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func closeWrite(conn net.Conn) error {
|
||||
if c, ok := conn.(interface{ CloseWrite() error }); ok {
|
||||
return c.CloseWrite()
|
||||
}
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
opts := stdioOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "dial-stdio",
|
||||
Short: "Proxy current stdio streams to builder instance",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.builder = rootOpts.builder
|
||||
return runDialStdio(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection")
|
||||
flags.StringVar(&opts.progress, "progress", "quiet", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||
return cmd
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -10,12 +9,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -26,7 +25,9 @@ type duOptions struct {
|
||||
verbose bool
|
||||
}
|
||||
|
||||
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) error {
|
||||
func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
pi, err := toBuildkitPruneInfo(opts.filter.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -37,7 +38,7 @@ func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) er
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -112,9 +113,8 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
return runDiskUsage(cmd.Context(), dockerCli, options)
|
||||
return runDiskUsage(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
@@ -1,604 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
}
|
||||
|
||||
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||
|
||||
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
attrs := rec.FrontendAttrs
|
||||
delete(attrs, "frontend.caps")
|
||||
|
||||
writeAttr := func(k, name string, f func(v string) (string, bool)) {
|
||||
if v, ok := attrs[k]; ok {
|
||||
if f != nil {
|
||||
v, ok = f(v)
|
||||
}
|
||||
if ok {
|
||||
fmt.Fprintf(tw, "%s:\t%s\n", name, v)
|
||||
}
|
||||
}
|
||||
delete(attrs, k)
|
||||
}
|
||||
|
||||
var context string
|
||||
var dockerfile string
|
||||
if st != nil {
|
||||
context = st.LocalPath
|
||||
dockerfile = st.DockerfilePath
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
if dockerfile != "" && dockerfile != "-" {
|
||||
if rel, err := filepath.Rel(context, dockerfile); err == nil {
|
||||
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||
dockerfile = rel
|
||||
}
|
||||
}
|
||||
}
|
||||
if context != "" {
|
||||
if rel, err := filepath.Rel(wd, context); err == nil {
|
||||
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||
context = rel
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := attrs["context"]; ok && context == "" {
|
||||
delete(attrs, "context")
|
||||
context = v
|
||||
}
|
||||
if dockerfile == "" {
|
||||
if v, ok := attrs["filename"]; ok {
|
||||
dockerfile = v
|
||||
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
|
||||
dockerfile = filepath.Join(dfdir, dockerfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(attrs, "filename")
|
||||
|
||||
if context != "" {
|
||||
fmt.Fprintf(tw, "Context:\t%s\n", context)
|
||||
}
|
||||
if dockerfile != "" {
|
||||
fmt.Fprintf(tw, "Dockerfile:\t%s\n", dockerfile)
|
||||
}
|
||||
if _, ok := attrs["context"]; !ok {
|
||||
if src, ok := attrs["vcs:source"]; ok {
|
||||
fmt.Fprintf(tw, "VCS Repository:\t%s\n", src)
|
||||
}
|
||||
if rev, ok := attrs["vcs:revision"]; ok {
|
||||
fmt.Fprintf(tw, "VCS Revision:\t%s\n", rev)
|
||||
}
|
||||
}
|
||||
|
||||
writeAttr("target", "Target", nil)
|
||||
writeAttr("platform", "Platform", func(v string) (string, bool) {
|
||||
return tryParseValue(v, func(v string) (string, error) {
|
||||
var pp []string
|
||||
for _, v := range strings.Split(v, ",") {
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
|
||||
}
|
||||
return strings.Join(pp, ", "), nil
|
||||
}), true
|
||||
})
|
||||
writeAttr("build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", "Keep Git Dir", func(v string) (string, bool) {
|
||||
return tryParseValue(v, func(v string) (string, error) {
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.FormatBool(b), nil
|
||||
}), true
|
||||
})
|
||||
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
printTable(dockerCli.Out(), attrs, "context:", "Named Context")
|
||||
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
fmt.Fprintf(tw, "Started:\t%s\n", rec.CreatedAt.AsTime().Local().Format("2006-01-02 15:04:05"))
|
||||
var duration time.Duration
|
||||
var statusStr string
|
||||
if rec.CompletedAt != nil {
|
||||
duration = rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime())
|
||||
} else {
|
||||
duration = rec.currentTimestamp.Sub(rec.CreatedAt.AsTime())
|
||||
statusStr = " (running)"
|
||||
}
|
||||
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(duration), statusStr)
|
||||
if rec.Error != nil {
|
||||
if codes.Code(rec.Error.Code) == codes.Canceled {
|
||||
fmt.Fprintf(tw, "Status:\tCanceled\n")
|
||||
} else {
|
||||
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", rec.NumCompletedSteps, rec.NumTotalSteps, float64(rec.NumCachedSteps)/float64(rec.NumTotalSteps)*100)
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
writeAttr("force-network-mode", "Network", nil)
|
||||
writeAttr("hostname", "Hostname", nil)
|
||||
writeAttr("add-hosts", "Extra Hosts", func(v string) (string, bool) {
|
||||
return tryParseValue(v, func(v string) (string, error) {
|
||||
fields, err := csvvalue.Fields(v, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join(fields, ", "), nil
|
||||
}), true
|
||||
})
|
||||
writeAttr("cgroup-parent", "Cgroup Parent", nil)
|
||||
writeAttr("image-resolve-mode", "Image Resolve Mode", nil)
|
||||
writeAttr("multi-platform", "Force Multi-Platform", nil)
|
||||
writeAttr("build-arg:BUILDKIT_MULTI_PLATFORM", "Force Multi-Platform", nil)
|
||||
writeAttr("no-cache", "Disable Cache", func(v string) (string, bool) {
|
||||
if v == "" {
|
||||
return "true", true
|
||||
}
|
||||
return v, true
|
||||
})
|
||||
writeAttr("shm-size", "Shm Size", nil)
|
||||
writeAttr("ulimit", "Resource Limits", nil)
|
||||
writeAttr("build-arg:BUILDKIT_CACHE_MOUNT_NS", "Cache Mount Namespace", nil)
|
||||
writeAttr("build-arg:BUILDKIT_DOCKERFILE_CHECK", "Dockerfile Check Config", nil)
|
||||
writeAttr("build-arg:SOURCE_DATE_EPOCH", "Source Date Epoch", nil)
|
||||
writeAttr("build-arg:SANDBOX_HOSTNAME", "Sandbox Hostname", nil)
|
||||
|
||||
var unusedAttrs []string
|
||||
for k := range attrs {
|
||||
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
|
||||
continue
|
||||
}
|
||||
unusedAttrs = append(unusedAttrs, k)
|
||||
}
|
||||
slices.Sort(unusedAttrs)
|
||||
|
||||
for _, k := range unusedAttrs {
|
||||
fmt.Fprintf(tw, "%s:\t%s\n", k, attrs[k])
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
printTable(dockerCli.Out(), attrs, "build-arg:", "Build Arg")
|
||||
printTable(dockerCli.Out(), attrs, "label:", "Label")
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
|
||||
attachments, err := allAttachments(ctx, store, *rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
|
||||
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
|
||||
})
|
||||
if provIndex != -1 {
|
||||
prov := attachments[provIndex]
|
||||
|
||||
dt, err := content.ReadBlob(ctx, store, prov.descr)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
|
||||
}
|
||||
|
||||
var pred provenancetypes.ProvenancePredicate
|
||||
if err := json.Unmarshal(dt, &pred); err != nil {
|
||||
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), "Materials:")
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "URI\tDIGEST\n")
|
||||
for _, m := range pred.Materials {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(digestSetToDigests(m.Digest), ", "))
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
|
||||
if len(attachments) > 0 {
|
||||
fmt.Fprintf(tw, "Attachments:\n")
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
|
||||
for _, a := range attachments {
|
||||
p := ""
|
||||
if a.platform != nil {
|
||||
p = platforms.FormatAll(*a.platform)
|
||||
}
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.descr.Digest, p, descrType(a.descr))
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
|
||||
if rec.ExternalError != nil {
|
||||
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
|
||||
}
|
||||
var st spb.Status
|
||||
if err := proto.Unmarshal(dt, &st); err != nil {
|
||||
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
|
||||
}
|
||||
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
|
||||
for _, s := range errdefs.Sources(retErr) {
|
||||
s.Print(dockerCli.Out())
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
var ve *errdefs.VertexError
|
||||
if errors.As(retErr, &ve) {
|
||||
dgst, err := digest.Parse(ve.Vertex.Digest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
|
||||
}
|
||||
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
fmt.Fprintln(dockerCli.Out(), "Logs:")
|
||||
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", name)
|
||||
for _, l := range logs {
|
||||
fmt.Fprintln(dockerCli.Out(), "> "+l)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
}
|
||||
|
||||
if debug.IsEnabled() {
|
||||
fmt.Fprintf(dockerCli.Out(), "\n%+v\n", stack.Formatter(retErr))
|
||||
} else if len(stack.Traces(retErr)) > 0 {
|
||||
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options inspectOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] [REF]",
|
||||
Short: "Inspect a build",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runInspect(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
attachmentCmd(dockerCli, rootOpts),
|
||||
)
|
||||
|
||||
// flags := cmd.Flags()
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
|
||||
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var name string
|
||||
var logs []string
|
||||
lastState := map[int]int{}
|
||||
|
||||
loop0:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
st.CloseSend()
|
||||
return "", nil, context.Cause(ctx)
|
||||
default:
|
||||
ev, err := st.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break loop0
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
ss := client.NewSolveStatus(ev)
|
||||
for _, v := range ss.Vertexes {
|
||||
if v.Digest == dgst {
|
||||
name = v.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, l := range ss.Logs {
|
||||
if l.Vertex == dgst {
|
||||
parts := bytes.Split(l.Data, []byte("\n"))
|
||||
for i, p := range parts {
|
||||
var wrote bool
|
||||
if i == 0 {
|
||||
idx, ok := lastState[l.Stream]
|
||||
if ok && idx != -1 {
|
||||
logs[idx] = logs[idx] + string(p)
|
||||
wrote = true
|
||||
}
|
||||
}
|
||||
if !wrote {
|
||||
if len(p) > 0 {
|
||||
logs = append(logs, string(p))
|
||||
}
|
||||
lastState[l.Stream] = len(logs) - 1
|
||||
}
|
||||
if i == len(parts)-1 && len(p) == 0 {
|
||||
lastState[l.Stream] = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if limit > 0 && len(logs) > limit {
|
||||
logs = logs[len(logs)-limit:]
|
||||
}
|
||||
|
||||
return name, logs, nil
|
||||
}
|
||||
|
||||
type attachment struct {
|
||||
platform *ocispecs.Platform
|
||||
descr ocispecs.Descriptor
|
||||
}
|
||||
|
||||
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
|
||||
var attachments []attachment
|
||||
|
||||
if rec.Result != nil {
|
||||
for _, a := range rec.Result.Attestations {
|
||||
attachments = append(attachments, attachment{
|
||||
descr: ociDesc(a),
|
||||
})
|
||||
}
|
||||
for _, r := range rec.Result.Results {
|
||||
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
|
||||
}
|
||||
}
|
||||
|
||||
for key, ri := range rec.Results {
|
||||
p, err := platforms.Parse(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range ri.Attestations {
|
||||
attachments = append(attachments, attachment{
|
||||
platform: &p,
|
||||
descr: ociDesc(a),
|
||||
})
|
||||
}
|
||||
for _, r := range ri.Results {
|
||||
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(attachments, func(a, b attachment) int {
|
||||
pCmp := 0
|
||||
if a.platform == nil && b.platform != nil {
|
||||
return -1
|
||||
} else if a.platform != nil && b.platform == nil {
|
||||
return 1
|
||||
} else if a.platform != nil && b.platform != nil {
|
||||
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
|
||||
}
|
||||
return cmp.Or(
|
||||
pCmp,
|
||||
cmp.Compare(descrType(a.descr), descrType(b.descr)),
|
||||
)
|
||||
})
|
||||
|
||||
return attachments, nil
|
||||
}
|
||||
|
||||
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
|
||||
_, err := store.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var out []attachment
|
||||
|
||||
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
|
||||
out = append(out, attachment{platform: platform, descr: desc})
|
||||
}
|
||||
|
||||
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
|
||||
return out
|
||||
}
|
||||
|
||||
dt, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
|
||||
var idx ocispecs.Index
|
||||
if err := json.Unmarshal(dt, &idx); err != nil {
|
||||
return out
|
||||
}
|
||||
|
||||
for _, d := range idx.Manifests {
|
||||
p := platform
|
||||
if d.Platform != nil {
|
||||
p = d.Platform
|
||||
}
|
||||
out = append(out, walkAttachments(ctx, store, d, p)...)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
|
||||
return ocispecs.Descriptor{
|
||||
MediaType: in.MediaType,
|
||||
Digest: digest.Digest(in.Digest),
|
||||
Size: in.Size,
|
||||
Annotations: in.Annotations,
|
||||
}
|
||||
}
|
||||
func descrType(desc ocispecs.Descriptor) string {
|
||||
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
|
||||
return typ
|
||||
}
|
||||
return desc.MediaType
|
||||
}
|
||||
|
||||
func tryParseValue(s string, f func(string) (string, error)) string {
|
||||
v, err := f(s)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%s (%v)", s, err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func printTable(w io.Writer, attrs map[string]string, prefix, title string) {
|
||||
var keys []string
|
||||
for k := range attrs {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
keys = append(keys, strings.TrimPrefix(k, prefix))
|
||||
}
|
||||
}
|
||||
slices.Sort(keys)
|
||||
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
|
||||
for _, k := range keys {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", k, attrs[prefix+k])
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
|
||||
func digestSetToDigests(ds slsa.DigestSet) []string {
|
||||
var out []string
|
||||
for k, v := range ds {
|
||||
out = append(out, fmt.Sprintf("%s:%s", k, v))
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
intoto "github.com/in-toto/in-toto-golang/in_toto"
|
||||
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type attachmentOptions struct {
|
||||
builder string
|
||||
typ string
|
||||
platform string
|
||||
ref string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
|
||||
if opts.digest != "" {
|
||||
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||
return err
|
||||
}
|
||||
|
||||
attachments, err := allAttachments(ctx, store, *rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
typ := opts.typ
|
||||
switch typ {
|
||||
case "index":
|
||||
typ = ocispecs.MediaTypeImageIndex
|
||||
case "manifest":
|
||||
typ = ocispecs.MediaTypeImageManifest
|
||||
case "image":
|
||||
typ = ocispecs.MediaTypeImageConfig
|
||||
case "provenance":
|
||||
typ = slsa02.PredicateSLSAProvenance
|
||||
case "sbom":
|
||||
typ = intoto.PredicateSPDX
|
||||
}
|
||||
|
||||
for _, a := range attachments {
|
||||
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
|
||||
continue
|
||||
}
|
||||
if typ != "" && descrType(a.descr) != typ {
|
||||
continue
|
||||
}
|
||||
ra, err := store.ReaderAt(ctx, a.descr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options attachmentOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "attachment [OPTIONS] REF [DIGEST]",
|
||||
Short: "Inspect a build attachment",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
if len(args) > 1 {
|
||||
dgst, err := digest.Parse(args[1])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid digest %q", args[1])
|
||||
}
|
||||
options.digest = dgst
|
||||
}
|
||||
|
||||
if options.digest == "" && options.platform == "" && options.typ == "" {
|
||||
return errors.New("at least one of --type, --platform or DIGEST must be specified")
|
||||
}
|
||||
|
||||
options.builder = *rootOpts.Builder
|
||||
return runAttachment(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.typ, "type", "", "Type of attachment")
|
||||
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type logsOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
progress string
|
||||
}
|
||||
|
||||
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||
Ref: rec.Ref,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
|
||||
if mode == progressui.AutoMode {
|
||||
mode = progressui.PlainMode
|
||||
}
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loop0:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cl.CloseSend()
|
||||
return context.Cause(ctx)
|
||||
default:
|
||||
ev, err := cl.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break loop0
|
||||
}
|
||||
return err
|
||||
}
|
||||
printer.Write(client.NewSolveStatus(ev))
|
||||
}
|
||||
}
|
||||
|
||||
return printer.Wait()
|
||||
}
|
||||
|
||||
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options logsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs [OPTIONS] [REF]",
|
||||
Short: "Print the logs of a build",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runLogs(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
lsHeaderBuildID = "BUILD ID"
|
||||
lsHeaderName = "NAME"
|
||||
lsHeaderStatus = "STATUS"
|
||||
lsHeaderCreated = "CREATED AT"
|
||||
lsHeaderDuration = "DURATION"
|
||||
lsHeaderLink = ""
|
||||
|
||||
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
|
||||
|
||||
headerKeyTimestamp = "buildkit-current-timestamp"
|
||||
)
|
||||
|
||||
type lsOptions struct {
|
||||
builder string
|
||||
format string
|
||||
noTrunc bool
|
||||
}
|
||||
|
||||
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := queryRecords(ctx, "", nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, rec := range out {
|
||||
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||
rec.name = buildName(rec.FrontendAttrs, st)
|
||||
out[i] = rec
|
||||
}
|
||||
|
||||
return lsPrint(dockerCli, out, opts)
|
||||
}
|
||||
|
||||
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options lsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ls",
|
||||
Short: "List build records",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = *rootOpts.Builder
|
||||
return runLs(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
|
||||
if in.format == formatter.TableFormatKey {
|
||||
in.format = lsDefaultTableFormat
|
||||
}
|
||||
|
||||
ctx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.Format(in.format),
|
||||
Trunc: !in.noTrunc,
|
||||
}
|
||||
|
||||
slices.SortFunc(records, func(a, b historyRecord) int {
|
||||
if a.CompletedAt == nil && b.CompletedAt != nil {
|
||||
return -1
|
||||
}
|
||||
if a.CompletedAt != nil && b.CompletedAt == nil {
|
||||
return 1
|
||||
}
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||
term = true
|
||||
}
|
||||
render := func(format func(subContext formatter.SubContext) error) error {
|
||||
for _, r := range records {
|
||||
if err := format(&lsContext{
|
||||
format: formatter.Format(in.format),
|
||||
isTerm: term,
|
||||
trunc: !in.noTrunc,
|
||||
record: &r,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
lsCtx := lsContext{
|
||||
isTerm: term,
|
||||
trunc: !in.noTrunc,
|
||||
}
|
||||
lsCtx.Header = formatter.SubHeaderContext{
|
||||
"Ref": lsHeaderBuildID,
|
||||
"Name": lsHeaderName,
|
||||
"Status": lsHeaderStatus,
|
||||
"CreatedAt": lsHeaderCreated,
|
||||
"Duration": lsHeaderDuration,
|
||||
"Link": lsHeaderLink,
|
||||
}
|
||||
|
||||
return ctx.Write(&lsCtx, render)
|
||||
}
|
||||
|
||||
type lsContext struct {
|
||||
formatter.HeaderContext
|
||||
|
||||
isTerm bool
|
||||
trunc bool
|
||||
format formatter.Format
|
||||
record *historyRecord
|
||||
}
|
||||
|
||||
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||
m := map[string]interface{}{
|
||||
"ref": c.FullRef(),
|
||||
"name": c.Name(),
|
||||
"status": c.Status(),
|
||||
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
|
||||
"total_steps": c.record.NumTotalSteps,
|
||||
"completed_steps": c.record.NumCompletedSteps,
|
||||
"cached_steps": c.record.NumCachedSteps,
|
||||
}
|
||||
if c.record.CompletedAt != nil {
|
||||
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
func (c *lsContext) Ref() string {
|
||||
return c.record.Ref
|
||||
}
|
||||
|
||||
func (c *lsContext) FullRef() string {
|
||||
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
|
||||
}
|
||||
|
||||
func (c *lsContext) Name() string {
|
||||
name := c.record.name
|
||||
if c.trunc && c.format.IsTable() {
|
||||
return trimBeginning(name, 36)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (c *lsContext) Status() string {
|
||||
if c.record.CompletedAt != nil {
|
||||
if c.record.Error != nil {
|
||||
return "Error"
|
||||
}
|
||||
return "Completed"
|
||||
}
|
||||
return "Running"
|
||||
}
|
||||
|
||||
func (c *lsContext) CreatedAt() string {
|
||||
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
|
||||
}
|
||||
|
||||
func (c *lsContext) Duration() string {
|
||||
lastTime := c.record.currentTimestamp
|
||||
if c.record.CompletedAt != nil {
|
||||
tm := c.record.CompletedAt.AsTime()
|
||||
lastTime = &tm
|
||||
}
|
||||
if lastTime == nil {
|
||||
return ""
|
||||
}
|
||||
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
|
||||
if c.record.CompletedAt == nil {
|
||||
v += "+"
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (c *lsContext) Link() string {
|
||||
url := desktop.BuildURL(c.FullRef())
|
||||
if c.format.IsTable() {
|
||||
if c.isTerm {
|
||||
return desktop.ANSIHyperlink(url, "Open")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return url
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/browser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type openOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
}
|
||||
|
||||
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||
return browser.OpenURL(url)
|
||||
}
|
||||
|
||||
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options openOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "open [OPTIONS] [REF]",
|
||||
Short: "Open a build in Docker Desktop",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runOpen(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builder string
|
||||
refs []string
|
||||
all bool
|
||||
}
|
||||
|
||||
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
errs := make([][]error, len(opts.refs))
|
||||
for i := range errs {
|
||||
errs[i] = make([]error, len(nodes))
|
||||
}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, node := range nodes {
|
||||
node := node
|
||||
eg.Go(func() error {
|
||||
if node.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
refs := opts.refs
|
||||
|
||||
if opts.all {
|
||||
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||
EarlyExit: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer serv.CloseSend()
|
||||
|
||||
for {
|
||||
resp, err := serv.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
|
||||
refs = append(refs, resp.Record.Ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for j, ref := range refs {
|
||||
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||
Ref: ref,
|
||||
Delete: true,
|
||||
})
|
||||
if opts.all {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
errs[j][i] = err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out []error
|
||||
loop0:
|
||||
for _, nodeErrs := range errs {
|
||||
var nodeErr error
|
||||
for _, err1 := range nodeErrs {
|
||||
if err1 == nil {
|
||||
continue loop0
|
||||
}
|
||||
if nodeErr == nil {
|
||||
nodeErr = err1
|
||||
} else {
|
||||
nodeErr = multierror.Append(nodeErr, err1)
|
||||
}
|
||||
}
|
||||
out = append(out, nodeErr)
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) == 1 {
|
||||
return out[0]
|
||||
}
|
||||
return multierror.Append(out[0], out[1:]...)
|
||||
}
|
||||
|
||||
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options rmOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS] [REF...]",
|
||||
Short: "Remove build records",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 && !options.all {
|
||||
return errors.New("rm requires at least one argument")
|
||||
}
|
||||
if len(args) > 0 && options.all {
|
||||
return errors.New("rm requires either --all or at least one argument")
|
||||
}
|
||||
options.refs = args
|
||||
options.builder = *rootOpts.Builder
|
||||
return runRm(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.all, "all", false, "Remove all build records")
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type RootOptions struct {
|
||||
Builder *string
|
||||
}
|
||||
|
||||
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "history",
|
||||
Short: "Commands to work on build records",
|
||||
ValidArgsFunction: completion.Disable,
|
||||
RunE: rootcmd.RunE,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
lsCmd(dockerCli, opts),
|
||||
rmCmd(dockerCli, opts),
|
||||
logsCmd(dockerCli, opts),
|
||||
inspectCmd(dockerCli, opts),
|
||||
openCmd(dockerCli, opts),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||
var res string
|
||||
|
||||
var target, contextPath, dockerfilePath, vcsSource string
|
||||
if v, ok := fattrs["target"]; ok {
|
||||
target = v
|
||||
}
|
||||
if v, ok := fattrs["context"]; ok {
|
||||
contextPath = filepath.ToSlash(v)
|
||||
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
|
||||
contextPath = filepath.ToSlash(v)
|
||||
}
|
||||
if v, ok := fattrs["vcs:source"]; ok {
|
||||
vcsSource = v
|
||||
}
|
||||
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
|
||||
dockerfilePath = filepath.ToSlash(v)
|
||||
}
|
||||
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
|
||||
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
|
||||
}
|
||||
|
||||
var localPath string
|
||||
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
|
||||
if ls.LocalPath != "" && ls.LocalPath != "-" {
|
||||
localPath = filepath.ToSlash(ls.LocalPath)
|
||||
}
|
||||
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
|
||||
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// remove default dockerfile name
|
||||
const defaultFilename = "/Dockerfile"
|
||||
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
|
||||
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
|
||||
|
||||
// dockerfile is a subpath of context
|
||||
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
|
||||
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
|
||||
} else {
|
||||
// Otherwise, use basename
|
||||
bpath := localPath
|
||||
if len(dockerfilePath) > 0 {
|
||||
bpath = dockerfilePath
|
||||
}
|
||||
if len(bpath) > 0 {
|
||||
lidx := strings.LastIndex(bpath, "/")
|
||||
res = bpath[lidx+1:]
|
||||
if !hasDefaultFileName {
|
||||
if lidx != -1 {
|
||||
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
|
||||
} else {
|
||||
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(contextPath) > 0 {
|
||||
res = contextPath
|
||||
}
|
||||
if len(target) > 0 {
|
||||
if len(res) > 0 {
|
||||
res = res + " (" + target + ")"
|
||||
} else {
|
||||
res = target
|
||||
}
|
||||
}
|
||||
if res == "" && vcsSource != "" {
|
||||
return vcsSource
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func trimBeginning(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return ".." + s[len(s)-n+2:]
|
||||
}
|
||||
|
||||
type historyRecord struct {
|
||||
*controlapi.BuildHistoryRecord
|
||||
currentTimestamp *time.Time
|
||||
node *builder.Node
|
||||
name string
|
||||
}
|
||||
|
||||
func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]historyRecord, error) {
|
||||
var mu sync.Mutex
|
||||
var out []historyRecord
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
eg.Go(func() error {
|
||||
if node.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
var records []historyRecord
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||
EarlyExit: true,
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md, err := serv.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ts *time.Time
|
||||
if v, ok := md[headerKeyTimestamp]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts = &t
|
||||
}
|
||||
defer serv.CloseSend()
|
||||
for {
|
||||
he, err := serv.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||
continue
|
||||
}
|
||||
records = append(records, historyRecord{
|
||||
BuildHistoryRecord: he.Record,
|
||||
currentTimestamp: ts,
|
||||
node: &node,
|
||||
})
|
||||
}
|
||||
mu.Lock()
|
||||
out = append(out, records...)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||
}
|
||||
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||
}
|
||||
@@ -7,14 +7,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,14 +24,12 @@ type createOptions struct {
|
||||
builder string
|
||||
files []string
|
||||
tags []string
|
||||
annotations []string
|
||||
dryrun bool
|
||||
actionAppend bool
|
||||
progress string
|
||||
preferIndex bool
|
||||
}
|
||||
|
||||
func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, args []string) error {
|
||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
if len(args) == 0 && len(in.files) == 0 {
|
||||
return errors.Errorf("no sources specified")
|
||||
}
|
||||
@@ -42,7 +38,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||
}
|
||||
|
||||
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||
fileArgs := make([]string, len(in.files))
|
||||
for i, f := range in.files {
|
||||
dt, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
@@ -114,6 +110,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
}
|
||||
}
|
||||
|
||||
ctx := appcontext.Context()
|
||||
|
||||
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -155,12 +153,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
}
|
||||
}
|
||||
|
||||
annotations, err := buildflags.ParseAnnotations(in.annotations)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse annotations")
|
||||
}
|
||||
|
||||
dt, desc, err := r.Combine(ctx, srcs, annotations, in.preferIndex)
|
||||
dt, desc, err := r.Combine(ctx, srcs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -173,9 +166,9 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||
// new resolver cause need new auth
|
||||
r = imagetools.New(imageopt)
|
||||
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -278,9 +271,8 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
Short: "Create a new image based on source images",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = *opts.Builder
|
||||
return runCreate(cmd.Context(), dockerCli, options, args)
|
||||
return runCreate(dockerCli, options, args)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
@@ -288,9 +280,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
|
||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||
flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -19,7 +17,9 @@ type inspectOptions struct {
|
||||
raw bool
|
||||
}
|
||||
|
||||
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.format != "" && in.raw {
|
||||
return errors.Errorf("format and raw cannot be used together")
|
||||
}
|
||||
@@ -50,9 +50,8 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = *rootOpts.Builder
|
||||
return runInspect(cmd.Context(), dockerCli, options, args[0])
|
||||
return runInspect(dockerCli, options, args[0])
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -10,12 +9,10 @@ type RootOptions struct {
|
||||
Builder *string
|
||||
}
|
||||
|
||||
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "imagetools",
|
||||
Short: "Commands to work on images in registry",
|
||||
ValidArgsFunction: completion.Disable,
|
||||
RunE: rootcmd.RunE,
|
||||
Use: "imagetools",
|
||||
Short: "Commands to work on images in registry",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
|
||||
@@ -4,20 +4,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,7 +21,9 @@ type inspectOptions struct {
|
||||
builder string
|
||||
}
|
||||
|
||||
func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) error {
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithSkippedValidation(),
|
||||
@@ -35,11 +32,10 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if in.bootstrap {
|
||||
var ok bool
|
||||
ok, err = b.Boot(ctx)
|
||||
@@ -47,7 +43,7 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
nodes, err = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
nodes, err = b.LoadNodes(timeoutCtx, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,60 +82,13 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else {
|
||||
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
||||
if len(n.BuildkitdFlags) > 0 {
|
||||
fmt.Fprintf(w, "BuildKit daemon flags:\t%s\n", strings.Join(n.BuildkitdFlags, " "))
|
||||
if len(n.Flags) > 0 {
|
||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
||||
}
|
||||
if nodes[i].Version != "" {
|
||||
fmt.Fprintf(w, "BuildKit version:\t%s\n", nodes[i].Version)
|
||||
}
|
||||
platforms := platformutil.FormatInGroups(n.Node.Platforms, n.Platforms)
|
||||
if len(platforms) > 0 {
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platforms, ", "))
|
||||
}
|
||||
if debug.IsEnabled() {
|
||||
fmt.Fprintf(w, "Features:\n")
|
||||
features := nodes[i].Driver.Features(ctx)
|
||||
featKeys := make([]string, 0, len(features))
|
||||
for k := range features {
|
||||
featKeys = append(featKeys, string(k))
|
||||
}
|
||||
sort.Strings(featKeys)
|
||||
for _, k := range featKeys {
|
||||
fmt.Fprintf(w, "\t%s:\t%t\n", k, features[driver.Feature(k)])
|
||||
}
|
||||
}
|
||||
if len(nodes[i].Labels) > 0 {
|
||||
fmt.Fprintf(w, "Labels:\n")
|
||||
for _, k := range sortedKeys(nodes[i].Labels) {
|
||||
v := nodes[i].Labels[k]
|
||||
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
||||
}
|
||||
}
|
||||
for ri, rule := range nodes[i].GCPolicy {
|
||||
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
||||
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
||||
if len(rule.Filter) > 0 {
|
||||
fmt.Fprintf(w, "\tFilters:\t%s\n", strings.Join(rule.Filter, " "))
|
||||
}
|
||||
if rule.KeepDuration > 0 {
|
||||
fmt.Fprintf(w, "\tKeep Duration:\t%v\n", rule.KeepDuration.String())
|
||||
}
|
||||
if rule.ReservedSpace > 0 {
|
||||
fmt.Fprintf(w, "\tReserved Space:\t%s\n", units.BytesSize(float64(rule.ReservedSpace)))
|
||||
}
|
||||
if rule.MaxUsedSpace > 0 {
|
||||
fmt.Fprintf(w, "\tMax Used Space:\t%s\n", units.BytesSize(float64(rule.MaxUsedSpace)))
|
||||
}
|
||||
if rule.MinFreeSpace > 0 {
|
||||
fmt.Fprintf(w, "\tMin Free Space:\t%s\n", units.BytesSize(float64(rule.MinFreeSpace)))
|
||||
}
|
||||
}
|
||||
for f, dt := range nodes[i].Files {
|
||||
fmt.Fprintf(w, "File#%s:\n", f)
|
||||
for _, line := range strings.Split(string(dt), "\n") {
|
||||
fmt.Fprintf(w, "\t> %s\n", line)
|
||||
}
|
||||
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
|
||||
}
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -161,9 +110,8 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runInspect(cmd.Context(), dockerCli, options)
|
||||
return runInspect(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
@@ -171,14 +119,3 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func sortedKeys(m map[string]string) []string {
|
||||
s := make([]string, len(m))
|
||||
i := 0
|
||||
for k := range m {
|
||||
s[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
type installOptions struct {
|
||||
}
|
||||
|
||||
func runInstall(_ command.Cli, _ installOptions) error {
|
||||
func runInstall(dockerCli command.Cli, in installOptions) error {
|
||||
dir := config.Dir()
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return errors.Wrap(err, "could not create docker config")
|
||||
@@ -47,8 +46,7 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInstall(dockerCli, options)
|
||||
},
|
||||
Hidden: true,
|
||||
ValidArgsFunction: completion.Disable,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// hide builder persistent flag for this command
|
||||
|
||||
384
commands/ls.go
384
commands/ls.go
@@ -2,46 +2,29 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
lsNameNodeHeader = "NAME/NODE"
|
||||
lsDriverEndpointHeader = "DRIVER/ENDPOINT"
|
||||
lsStatusHeader = "STATUS"
|
||||
lsLastActivityHeader = "LAST ACTIVITY"
|
||||
lsBuildkitHeader = "BUILDKIT"
|
||||
lsPlatformsHeader = "PLATFORMS"
|
||||
|
||||
lsIndent = ` \_ `
|
||||
|
||||
lsDefaultTableFormat = "table {{.Name}}\t{{.DriverEndpoint}}\t{{.Status}}\t{{.Buildkit}}\t{{.Platforms}}"
|
||||
)
|
||||
|
||||
type lsOptions struct {
|
||||
format string
|
||||
noTrunc bool
|
||||
}
|
||||
|
||||
func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||
func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -58,15 +41,14 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
func(b *builder.Builder) {
|
||||
eg.Go(func() error {
|
||||
_, _ = b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
_, _ = b.LoadNodes(timeoutCtx, true)
|
||||
return nil
|
||||
})
|
||||
}(b)
|
||||
@@ -76,9 +58,22 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasErrors, err := lsPrint(dockerCli, current, builders, in); err != nil {
|
||||
return err
|
||||
} else if hasErrors {
|
||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
||||
|
||||
printErr := false
|
||||
for _, b := range builders {
|
||||
if current.Name == b.Name {
|
||||
b.Name += " *"
|
||||
}
|
||||
if ok := printBuilder(w, b); !ok {
|
||||
printErr = true
|
||||
}
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
|
||||
if printErr {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||
for _, b := range builders {
|
||||
if b.Err() != nil {
|
||||
@@ -96,6 +91,31 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) {
|
||||
ok = true
|
||||
var err string
|
||||
if b.Err() != nil {
|
||||
ok = false
|
||||
err = "error"
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
|
||||
if b.Err() == nil {
|
||||
for _, n := range b.Nodes() {
|
||||
var status string
|
||||
if n.DriverInfo != nil {
|
||||
status = n.DriverInfo.Status.String()
|
||||
}
|
||||
if n.Err != nil {
|
||||
ok = false
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
||||
} else {
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
||||
var options lsOptions
|
||||
|
||||
@@ -104,314 +124,12 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
||||
Short: "List builder instances",
|
||||
Args: cli.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runLs(cmd.Context(), dockerCli, options)
|
||||
return runLs(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
|
||||
// hide builder persistent flag for this command
|
||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builder.Builder, in lsOptions) (hasErrors bool, _ error) {
|
||||
if in.format == formatter.TableFormatKey {
|
||||
in.format = lsDefaultTableFormat
|
||||
}
|
||||
|
||||
ctx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.Format(in.format),
|
||||
Trunc: !in.noTrunc,
|
||||
}
|
||||
|
||||
sort.SliceStable(builders, func(i, j int) bool {
|
||||
ierr := builders[i].Err() != nil
|
||||
jerr := builders[j].Err() != nil
|
||||
if ierr && !jerr {
|
||||
return false
|
||||
} else if !ierr && jerr {
|
||||
return true
|
||||
}
|
||||
return i < j
|
||||
})
|
||||
|
||||
render := func(format func(subContext formatter.SubContext) error) error {
|
||||
for _, b := range builders {
|
||||
if err := format(&lsContext{
|
||||
format: ctx.Format,
|
||||
trunc: ctx.Trunc,
|
||||
Builder: &lsBuilder{
|
||||
Builder: b,
|
||||
Current: b.Name == current.Name,
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Err() != nil {
|
||||
if ctx.Format.IsTable() {
|
||||
hasErrors = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, n := range b.Nodes() {
|
||||
if n.Err != nil {
|
||||
if ctx.Format.IsTable() {
|
||||
hasErrors = true
|
||||
}
|
||||
}
|
||||
if err := format(&lsContext{
|
||||
format: ctx.Format,
|
||||
trunc: ctx.Trunc,
|
||||
Builder: &lsBuilder{
|
||||
Builder: b,
|
||||
Current: b.Name == current.Name,
|
||||
},
|
||||
node: n,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
lsCtx := lsContext{}
|
||||
lsCtx.Header = formatter.SubHeaderContext{
|
||||
"Name": lsNameNodeHeader,
|
||||
"DriverEndpoint": lsDriverEndpointHeader,
|
||||
"LastActivity": lsLastActivityHeader,
|
||||
"Status": lsStatusHeader,
|
||||
"Buildkit": lsBuildkitHeader,
|
||||
"Platforms": lsPlatformsHeader,
|
||||
}
|
||||
|
||||
return hasErrors, ctx.Write(&lsCtx, render)
|
||||
}
|
||||
|
||||
type lsBuilder struct {
|
||||
*builder.Builder
|
||||
Current bool
|
||||
}
|
||||
|
||||
type lsContext struct {
|
||||
formatter.HeaderContext
|
||||
Builder *lsBuilder
|
||||
|
||||
format formatter.Format
|
||||
trunc bool
|
||||
node builder.Node
|
||||
}
|
||||
|
||||
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(c.Builder)
|
||||
}
|
||||
|
||||
func (c *lsContext) Name() string {
|
||||
if c.node.Name == "" {
|
||||
name := c.Builder.Name
|
||||
if c.Builder.Current && c.format.IsTable() {
|
||||
name += "*"
|
||||
}
|
||||
return name
|
||||
}
|
||||
if c.format.IsTable() {
|
||||
return lsIndent + c.node.Name
|
||||
}
|
||||
return c.node.Name
|
||||
}
|
||||
|
||||
func (c *lsContext) DriverEndpoint() string {
|
||||
if c.node.Name == "" {
|
||||
return c.Builder.Driver
|
||||
}
|
||||
if c.format.IsTable() {
|
||||
return lsIndent + c.node.Endpoint
|
||||
}
|
||||
return c.node.Endpoint
|
||||
}
|
||||
|
||||
func (c *lsContext) LastActivity() string {
|
||||
if c.node.Name != "" || c.Builder.LastActivity.IsZero() {
|
||||
return ""
|
||||
}
|
||||
return c.Builder.LastActivity.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func (c *lsContext) Status() string {
|
||||
if c.node.Name == "" {
|
||||
if c.Builder.Err() != nil {
|
||||
return "error"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
if c.node.Err != nil {
|
||||
return "error"
|
||||
}
|
||||
if c.node.DriverInfo != nil {
|
||||
return c.node.DriverInfo.Status.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *lsContext) Buildkit() string {
|
||||
if c.node.Name == "" {
|
||||
return ""
|
||||
}
|
||||
return c.node.Version
|
||||
}
|
||||
|
||||
func (c *lsContext) Platforms() string {
|
||||
if c.node.Name == "" {
|
||||
return ""
|
||||
}
|
||||
pfs := platformutil.FormatInGroups(c.node.Node.Platforms, c.node.Platforms)
|
||||
if c.trunc && c.format.IsTable() {
|
||||
return truncPlatforms(pfs, 4).String()
|
||||
}
|
||||
return strings.Join(pfs, ", ")
|
||||
}
|
||||
|
||||
func (c *lsContext) Error() string {
|
||||
if c.node.Name != "" && c.node.Err != nil {
|
||||
return c.node.Err.Error()
|
||||
} else if err := c.Builder.Err(); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var truncMajorPlatforms = []string{
|
||||
"linux/amd64",
|
||||
"linux/arm64",
|
||||
"linux/arm",
|
||||
"linux/ppc64le",
|
||||
"linux/s390x",
|
||||
"linux/riscv64",
|
||||
"linux/mips64",
|
||||
}
|
||||
|
||||
type truncatedPlatforms struct {
|
||||
res map[string][]string
|
||||
input []string
|
||||
max int
|
||||
}
|
||||
|
||||
func (tp truncatedPlatforms) List() map[string][]string {
|
||||
return tp.res
|
||||
}
|
||||
|
||||
func (tp truncatedPlatforms) String() string {
|
||||
var out []string
|
||||
var count int
|
||||
|
||||
var keys []string
|
||||
for k := range tp.res {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
for _, mpf := range truncMajorPlatforms {
|
||||
if tpf, ok := tp.res[mpf]; ok {
|
||||
seen[mpf] = struct{}{}
|
||||
if len(tpf) == 1 {
|
||||
out = append(out, tpf[0])
|
||||
count++
|
||||
} else {
|
||||
hasPreferredPlatform := false
|
||||
for _, pf := range tpf {
|
||||
if strings.HasSuffix(pf, "*") {
|
||||
hasPreferredPlatform = true
|
||||
break
|
||||
}
|
||||
}
|
||||
mainpf := mpf
|
||||
if hasPreferredPlatform {
|
||||
mainpf += "*"
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tpf)))
|
||||
count += len(tpf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, mpf := range keys {
|
||||
if len(out) >= tp.max {
|
||||
break
|
||||
}
|
||||
if _, ok := seen[mpf]; ok {
|
||||
continue
|
||||
}
|
||||
if len(tp.res[mpf]) == 1 {
|
||||
out = append(out, tp.res[mpf][0])
|
||||
count++
|
||||
} else {
|
||||
hasPreferredPlatform := false
|
||||
for _, pf := range tp.res[mpf] {
|
||||
if strings.HasSuffix(pf, "*") {
|
||||
hasPreferredPlatform = true
|
||||
break
|
||||
}
|
||||
}
|
||||
mainpf := mpf
|
||||
if hasPreferredPlatform {
|
||||
mainpf += "*"
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%s (+%d)", mainpf, len(tp.res[mpf])))
|
||||
count += len(tp.res[mpf])
|
||||
}
|
||||
}
|
||||
|
||||
left := len(tp.input) - count
|
||||
if left > 0 {
|
||||
out = append(out, fmt.Sprintf("(%d more)", left))
|
||||
}
|
||||
|
||||
return strings.Join(out, ", ")
|
||||
}
|
||||
|
||||
func truncPlatforms(pfs []string, max int) truncatedPlatforms {
|
||||
res := make(map[string][]string)
|
||||
for _, mpf := range truncMajorPlatforms {
|
||||
for _, pf := range pfs {
|
||||
if len(res) >= max {
|
||||
break
|
||||
}
|
||||
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if pp.OS+"/"+pp.Architecture == mpf {
|
||||
res[mpf] = append(res[mpf], pf)
|
||||
}
|
||||
}
|
||||
}
|
||||
left := make(map[string][]string)
|
||||
for _, pf := range pfs {
|
||||
if len(res) >= max {
|
||||
break
|
||||
}
|
||||
pp, err := platforms.Parse(strings.TrimSuffix(pf, "*"))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ppf := strings.TrimSuffix(pp.OS+"/"+pp.Architecture, "*")
|
||||
if _, ok := res[ppf]; !ok {
|
||||
left[ppf] = append(left[ppf], pf)
|
||||
}
|
||||
}
|
||||
for k, v := range left {
|
||||
res[k] = v
|
||||
}
|
||||
return truncatedPlatforms{
|
||||
res: res,
|
||||
input: pfs,
|
||||
max: max,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTruncPlatforms(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
platforms []string
|
||||
max int
|
||||
expectedList map[string][]string
|
||||
expectedOut string
|
||||
}{
|
||||
{
|
||||
name: "arm64 preferred and emulated",
|
||||
platforms: []string{"linux/arm64*", "linux/amd64", "linux/amd64/v2", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/386", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/amd64": {
|
||||
"linux/amd64",
|
||||
"linux/amd64/v2",
|
||||
},
|
||||
"linux/arm": {
|
||||
"linux/arm/v7",
|
||||
"linux/arm/v6",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64*",
|
||||
},
|
||||
"linux/ppc64le": {
|
||||
"linux/ppc64le",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/amd64 (+2), linux/arm64*, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||
},
|
||||
{
|
||||
name: "riscv64 preferred only",
|
||||
platforms: []string{"linux/riscv64*"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/riscv64": {
|
||||
"linux/riscv64*",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/riscv64*",
|
||||
},
|
||||
{
|
||||
name: "amd64 no preferred and emulated",
|
||||
platforms: []string{"linux/amd64", "linux/amd64/v2", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64le", "linux/mips64", "linux/arm/v7", "linux/arm/v6"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/amd64": {
|
||||
"linux/amd64",
|
||||
"linux/amd64/v2",
|
||||
"linux/amd64/v3",
|
||||
},
|
||||
"linux/arm": {
|
||||
"linux/arm/v7",
|
||||
"linux/arm/v6",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64",
|
||||
},
|
||||
"linux/ppc64le": {
|
||||
"linux/ppc64le",
|
||||
}},
|
||||
expectedOut: "linux/amd64 (+3), linux/arm64, linux/arm (+2), linux/ppc64le, (5 more)",
|
||||
},
|
||||
{
|
||||
name: "amd64 no preferred",
|
||||
platforms: []string{"linux/amd64", "linux/386"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/386": {
|
||||
"linux/386",
|
||||
},
|
||||
"linux/amd64": {
|
||||
"linux/amd64",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/amd64, linux/386",
|
||||
},
|
||||
{
|
||||
name: "arm64 no preferred",
|
||||
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/arm": {
|
||||
"linux/arm/v7",
|
||||
"linux/arm/v6",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/arm64, linux/arm (+2)",
|
||||
},
|
||||
{
|
||||
name: "all preferred",
|
||||
platforms: []string{"darwin/arm64*", "linux/arm64*", "linux/arm/v5*", "linux/arm/v6*", "linux/arm/v7*", "windows/arm64*"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"darwin/arm64": {
|
||||
"darwin/arm64*",
|
||||
},
|
||||
"linux/arm": {
|
||||
"linux/arm/v5*",
|
||||
"linux/arm/v6*",
|
||||
"linux/arm/v7*",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64*",
|
||||
},
|
||||
"windows/arm64": {
|
||||
"windows/arm64*",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/arm64*, linux/arm* (+3), darwin/arm64*, windows/arm64*",
|
||||
},
|
||||
{
|
||||
name: "no major preferred",
|
||||
platforms: []string{"linux/amd64/v2*", "linux/arm/v6*", "linux/mips64le*", "linux/amd64", "linux/amd64/v3", "linux/386", "linux/arm64", "linux/riscv64", "linux/ppc64le", "linux/s390x", "linux/mips64", "linux/arm/v7"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/amd64": {
|
||||
"linux/amd64/v2*",
|
||||
"linux/amd64",
|
||||
"linux/amd64/v3",
|
||||
},
|
||||
"linux/arm": {
|
||||
"linux/arm/v6*",
|
||||
"linux/arm/v7",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64",
|
||||
},
|
||||
"linux/ppc64le": {
|
||||
"linux/ppc64le",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/amd64* (+3), linux/arm64, linux/arm* (+2), linux/ppc64le, (5 more)",
|
||||
},
|
||||
{
|
||||
name: "no major with multiple variants",
|
||||
platforms: []string{"linux/arm64", "linux/arm/v7", "linux/arm/v6", "linux/mips64le/softfloat", "linux/mips64le/hardfloat"},
|
||||
max: 4,
|
||||
expectedList: map[string][]string{
|
||||
"linux/arm": {
|
||||
"linux/arm/v7",
|
||||
"linux/arm/v6",
|
||||
},
|
||||
"linux/arm64": {
|
||||
"linux/arm64",
|
||||
},
|
||||
"linux/mips64le": {
|
||||
"linux/mips64le/softfloat",
|
||||
"linux/mips64le/hardfloat",
|
||||
},
|
||||
},
|
||||
expectedOut: "linux/arm64, linux/arm (+2), linux/mips64le (+2)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tpfs := truncPlatforms(tt.platforms, tt.max)
|
||||
assert.Equal(t, tt.expectedList, tpfs.List())
|
||||
assert.Equal(t, tt.expectedOut, tpfs.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
48
commands/print.go
Normal file
48
commands/print.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/moby/buildkit/frontend/subrequests"
|
||||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
||||
)
|
||||
|
||||
func printResult(f *build.PrintFunc, res map[string]string) error {
|
||||
switch f.Name {
|
||||
case "outline":
|
||||
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
case "targets":
|
||||
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
||||
case "subrequests.describe":
|
||||
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
||||
default:
|
||||
if dt, ok := res["result.txt"]; ok {
|
||||
fmt.Print(dt)
|
||||
} else {
|
||||
log.Printf("%s %+v", f, res)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type printFunc func([]byte, io.Writer) error
|
||||
|
||||
func printValue(printer printFunc, version string, format string, res map[string]string) error {
|
||||
if format == "json" {
|
||||
fmt.Fprintln(os.Stdout, res["result.json"])
|
||||
return nil
|
||||
}
|
||||
|
||||
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
|
||||
// structure is too new and we don't know how to print it
|
||||
fmt.Fprint(os.Stdout, res["result.txt"])
|
||||
return nil
|
||||
}
|
||||
return printer([]byte(res["result.json"]), os.Stdout)
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -9,30 +8,25 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
pb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
builder string
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
reservedSpace opts.MemBytes
|
||||
maxUsedSpace opts.MemBytes
|
||||
minFreeSpace opts.MemBytes
|
||||
force bool
|
||||
verbose bool
|
||||
builder string
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
keepStorage opts.MemBytes
|
||||
force bool
|
||||
verbose bool
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -40,7 +34,9 @@ const (
|
||||
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) error {
|
||||
func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
pruneFilters := opts.filter.Value()
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
@@ -54,12 +50,8 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
||||
warning = allCacheWarning
|
||||
}
|
||||
|
||||
if !opts.force {
|
||||
if ok, err := prompt(ctx, dockerCli.In(), dockerCli.Out(), warning); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return nil
|
||||
}
|
||||
if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
@@ -67,7 +59,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,19 +102,8 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// check if the client supports newer prune options
|
||||
if opts.maxUsedSpace.Value() != 0 || opts.minFreeSpace.Value() != 0 {
|
||||
caps, err := loadLLBCaps(ctx, c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to load buildkit capabilities for prune")
|
||||
}
|
||||
if caps.Supports(pb.CapGCFreeSpaceFilter) != nil {
|
||||
return errors.New("buildkit v0.17.0+ is required for max-used-space and min-free-space filters")
|
||||
}
|
||||
}
|
||||
|
||||
popts := []client.PruneOption{
|
||||
client.WithKeepOpt(pi.KeepDuration, opts.reservedSpace.Value(), opts.maxUsedSpace.Value(), opts.minFreeSpace.Value()),
|
||||
client.WithKeepOpt(pi.KeepDuration, opts.keepStorage.Value()),
|
||||
client.WithFilter(pi.Filter),
|
||||
}
|
||||
if opts.all {
|
||||
@@ -147,17 +128,6 @@ func runPrune(ctx context.Context, dockerCli command.Cli, opts pruneOptions) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadLLBCaps(ctx context.Context, c *client.Client) (apicaps.CapSet, error) {
|
||||
var caps apicaps.CapSet
|
||||
_, err := c.Build(ctx, client.SolveOpt{
|
||||
Internal: true,
|
||||
}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
caps = c.BuildOpts().LLBCaps
|
||||
return nil, nil
|
||||
}, nil)
|
||||
return caps, err
|
||||
}
|
||||
|
||||
func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
@@ -167,23 +137,17 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
return runPrune(cmd.Context(), dockerCli, options)
|
||||
return runPrune(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
|
||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
||||
flags.Var(&options.reservedSpace, "reserved-space", "Amount of disk space always allowed to keep for cache")
|
||||
flags.Var(&options.minFreeSpace, "min-free-space", "Target amount of free disk space after pruning")
|
||||
flags.Var(&options.maxUsedSpace, "max-used-space", "Maximum amount of disk space allowed to keep for cache")
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
||||
flags.Var(&options.reservedSpace, "keep-storage", "Amount of disk space to keep for cache")
|
||||
flags.MarkDeprecated("keep-storage", "keep-storage flag has been changed to max-storage")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -227,8 +191,6 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
||||
case 1:
|
||||
if filterKey == "id" {
|
||||
filters = append(filters, filterKey+"~="+values[0])
|
||||
} else if strings.HasSuffix(filterKey, "!") || strings.HasSuffix(filterKey, "~") {
|
||||
filters = append(filters, filterKey+"="+values[0])
|
||||
} else {
|
||||
filters = append(filters, filterKey+"=="+values[0])
|
||||
}
|
||||
|
||||
105
commands/rm.go
105
commands/rm.go
@@ -8,15 +8,16 @@ import (
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builders []string
|
||||
builder string
|
||||
keepState bool
|
||||
keepDaemon bool
|
||||
allInactive bool
|
||||
@@ -27,13 +28,11 @@ const (
|
||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runRm(ctx context.Context, dockerCli command.Cli, in rmOptions) error {
|
||||
if in.allInactive && !in.force {
|
||||
if ok, err := prompt(ctx, dockerCli.In(), dockerCli.Out(), rmInactiveWarning); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return nil
|
||||
}
|
||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
||||
return nil
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
@@ -46,52 +45,33 @@ func runRm(ctx context.Context, dockerCli command.Cli, in rmOptions) error {
|
||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, name := range in.builders {
|
||||
func(name string) {
|
||||
eg.Go(func() (err error) {
|
||||
defer func() {
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", name)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "failed to remove %s: %v\n", name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(name),
|
||||
builder.WithStore(txn),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cb := b.ContextName(); cb != "" {
|
||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||
}
|
||||
|
||||
err1 := rm(ctx, nodes, in)
|
||||
if err := txn.Remove(b.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}(name)
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithStore(txn),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.New("failed to remove one or more builders")
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cb := b.ContextName(); cb != "" {
|
||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||
}
|
||||
|
||||
err1 := rm(ctx, nodes, in)
|
||||
if err := txn.Remove(b.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -99,24 +79,24 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
var options rmOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS] [NAME] [NAME...]",
|
||||
Short: "Remove one or more builder instances",
|
||||
Use: "rm [NAME]",
|
||||
Short: "Remove a builder instance",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builders = []string{rootOpts.builder}
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
if options.allInactive {
|
||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
||||
}
|
||||
options.builders = args
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runRm(cmd.Context(), dockerCli, options)
|
||||
return runRm(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the BuildKit daemon running")
|
||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
||||
@@ -150,15 +130,14 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
func(b *builder.Builder) {
|
||||
eg.Go(func() error {
|
||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot load %s", b.Name)
|
||||
}
|
||||
|
||||
@@ -1,29 +1,20 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
debugcmd "github.com/docker/buildx/commands/debug"
|
||||
historycmd "github.com/docker/buildx/commands/history"
|
||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||
"github.com/docker/buildx/controller/remote"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||
var opt rootOptions
|
||||
cmd := &cobra.Command{
|
||||
Short: "Docker Buildx",
|
||||
Long: `Extended build capabilities with BuildKit`,
|
||||
@@ -31,31 +22,12 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
Annotations: map[string]string{
|
||||
annotation.CodeDelimiter: `"`,
|
||||
},
|
||||
CompletionOptions: cobra.CompletionOptions{
|
||||
HiddenDefaultCmd: true,
|
||||
},
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if opt.debug {
|
||||
debug.Enable()
|
||||
}
|
||||
cmd.SetContext(appcontext.Context())
|
||||
if !isPlugin {
|
||||
return nil
|
||||
}
|
||||
return plugin.PersistentPreRunE(cmd, args)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return cmd.Help()
|
||||
}
|
||||
_ = cmd.Help()
|
||||
return cli.StatusError{
|
||||
StatusCode: 1,
|
||||
Status: fmt.Sprintf("ERROR: unknown command: %q", args[0]),
|
||||
}
|
||||
},
|
||||
}
|
||||
if !isPlugin {
|
||||
if isPlugin {
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
return plugin.PersistentPreRunE(cmd, args)
|
||||
}
|
||||
} else {
|
||||
// match plugin behavior for standalone mode
|
||||
// https://github.com/docker/cli/blob/6c9eb708fa6d17765d71965f90e1c59cea686ee9/cli-plugins/plugin/plugin.go#L117-L127
|
||||
cmd.SilenceUsage = true
|
||||
@@ -75,27 +47,33 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
"using default config store",
|
||||
))
|
||||
|
||||
if !confutil.IsExperimental() {
|
||||
cmd.SetHelpTemplate(cmd.HelpTemplate() + "\nExperimental commands and flags are hidden. Set BUILDX_EXPERIMENTAL=1 to show them.\n")
|
||||
}
|
||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
||||
// when listing builder instances with "buildx ls" for those that are
|
||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
"commandConn.CloseWrite:",
|
||||
"commandConn.CloseRead:",
|
||||
))
|
||||
|
||||
addCommands(cmd, &opt, dockerCli)
|
||||
addCommands(cmd, dockerCli)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type rootOptions struct {
|
||||
builder string
|
||||
debug bool
|
||||
}
|
||||
|
||||
func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||
func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
opts := &rootOptions{}
|
||||
rootFlags(opts, cmd.PersistentFlags())
|
||||
|
||||
cmd.AddCommand(
|
||||
buildCmd(dockerCli, opts, nil),
|
||||
buildCmd(dockerCli, opts),
|
||||
bakeCmd(dockerCli, opts),
|
||||
createCmd(dockerCli),
|
||||
dialStdioCmd(dockerCli, opts),
|
||||
rmCmd(dockerCli, opts),
|
||||
lsCmd(dockerCli),
|
||||
useCmd(dockerCli, opts),
|
||||
@@ -106,23 +84,10 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||
versionCmd(dockerCli),
|
||||
pruneCmd(dockerCli, opts),
|
||||
duCmd(dockerCli, opts),
|
||||
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
|
||||
)
|
||||
if confutil.IsExperimental() {
|
||||
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||
newDebuggableBuild(dockerCli, opts),
|
||||
))
|
||||
remote.AddControllerCommands(cmd, dockerCli)
|
||||
}
|
||||
|
||||
cmd.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"builder",
|
||||
completion.BuilderNames(dockerCli),
|
||||
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||
)
|
||||
}
|
||||
|
||||
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
||||
flags.BoolVarP(&options.debug, "debug", "D", debug.IsEnabled(), "Enable debug logging")
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,9 @@ type stopOptions struct {
|
||||
builder string
|
||||
}
|
||||
|
||||
func runStop(ctx context.Context, dockerCli command.Cli, in stopOptions) error {
|
||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithSkippedValidation(),
|
||||
@@ -22,7 +24,7 @@ func runStop(ctx context.Context, dockerCli command.Cli, in stopOptions) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -42,9 +44,8 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
if len(args) > 0 {
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runStop(cmd.Context(), dockerCli, options)
|
||||
return runStop(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||
}
|
||||
|
||||
return cmd
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
type uninstallOptions struct {
|
||||
}
|
||||
|
||||
func runUninstall(_ command.Cli, _ uninstallOptions) error {
|
||||
func runUninstall(dockerCli command.Cli, in uninstallOptions) error {
|
||||
dir := config.Dir()
|
||||
cfg, err := config.Load(dir)
|
||||
if err != nil {
|
||||
@@ -53,8 +52,7 @@ func uninstallCmd(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runUninstall(dockerCli, options)
|
||||
},
|
||||
Hidden: true,
|
||||
ValidArgsFunction: completion.Disable,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// hide builder persistent flag for this command
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@@ -35,7 +34,10 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return txn.SetCurrent(ep, "", false, false)
|
||||
if err := txn.SetCurrent(ep, "", false, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
@@ -46,6 +48,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||
}
|
||||
@@ -54,7 +57,11 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return txn.SetCurrent(ep, in.builder, in.isGlobal, in.isDefault)
|
||||
if err := txn.SetCurrent(ep, in.builder, in.isGlobal, in.isDefault); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
@@ -71,7 +78,6 @@ func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
}
|
||||
return runUse(dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.BuilderNames(dockerCli),
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user