Compare commits

..

6 Commits

Author SHA1 Message Date
CrazyMax
2af40b75b7 Merge pull request #1415 from jedevc/cherry-pick-1383-to-0.9
[0.9] driver: don't create tracer delegate opt if tracer is nil
2022-11-21 10:29:09 +01:00
Justin Chadwell
83f3691c15 driver: don't create tracer delegate opt if tracer is nil
The error handling for the cast to client.TracerDelegate was incorrect,
and previously, a client would unconditionally append an opt.

This results in the scenario that while the ClientOpt was not nil, the
tracer delegate in the ClientOpt was, which isn't an error case
explicitly handled by buildkit.

Signed-off-by: Justin Chadwell <me@jedevc.com>
2022-11-18 13:36:31 +00:00
CrazyMax
4e93e87991 Merge pull request #1409 from jedevc/cherry-pick-1406-to-0.9
[0.9] Synchronise access to the map when printing
2022-11-18 13:48:30 +01:00
Felix de Souza
3f1516d3fe Synchronise access to the map when printing.
Signed-off-by: Felix de Souza <fdesouza@palantir.com>
2022-11-15 15:54:43 +00:00
Tõnis Tiigi
09d1e1ee99 Merge pull request #1348 from AkihiroSuda/gcos-rootless-0.9
[v0.9] kubernetes: rootless: support Google Container-Optimized OS
2022-11-01 22:34:53 -07:00
Akihiro Suda
2e9906ba20 kubernetes: rootless: support Google Container-Optimized OS
Tested with GKE Autopilot 1.24.3-gke.200 (kernel 5.10.123+, containerd 1.6.6).

ref: moby/buildkit PR 3097

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit 33e5f47c6c)
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
2022-10-07 18:57:26 +09:00
1149 changed files with 20942 additions and 93084 deletions

View File

@@ -1 +1,3 @@
bin/
cross-out/
release-out/

View File

@@ -16,19 +16,14 @@ on:
branches:
- 'master'
- 'v[0-9]*'
paths-ignore:
- 'README.md'
- 'docs/**'
env:
BUILDX_VERSION: "v0.10.0-rc1"
BUILDKIT_IMAGE: "moby/buildkit:v0.11.0-rc3"
REPO_SLUG: "docker/buildx-bin"
DESTDIR: "./bin"
RELEASE_OUT: "./release-out"
jobs:
test:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
-
name: Checkout
@@ -37,9 +32,7 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Test
uses: docker/bake-action@v2
@@ -52,10 +45,10 @@ jobs:
name: Upload coverage
uses: codecov/codecov-action@v3
with:
directory: ${{ env.DESTDIR }}/coverage
file: ./coverage/coverage.txt
prepare:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
steps:
@@ -66,14 +59,14 @@ jobs:
name: Create matrix
id: platforms
run: |
echo "matrix=$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')" >>${GITHUB_OUTPUT}
echo ::set-output name=matrix::$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')
-
name: Show matrix
run: |
echo ${{ steps.platforms.outputs.matrix }}
binaries:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
needs:
- prepare
strategy:
@@ -96,27 +89,26 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Build
run: |
make release
env:
PLATFORMS: ${{ matrix.platform }}
CACHE_FROM: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
CACHE_TO: type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
uses: docker/bake-action@v2
with:
targets: release
set: |
*.platform=${{ matrix.platform }}
*.cache-from=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
*.cache-to=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: buildx
path: ${{ env.DESTDIR }}/*
path: ${{ env.RELEASE_OUT }}/*
if-no-files-found: error
bin-image:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
if: ${{ github.event_name != 'pull_request' }}
steps:
-
@@ -129,9 +121,7 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
buildkitd-flags: --debug
version: latest
-
name: Docker meta
id: meta
@@ -163,11 +153,9 @@ jobs:
set: |
*.cache-from=type=gha,scope=bin-image
*.cache-to=type=gha,scope=bin-image,mode=max
*.attest=type=sbom
*.attest=type=provenance,mode=max,builder-id=https://github.com/${{ env.GITHUB_REPOSITORY }}/actions/runs/${{ env.GITHUB_RUN_ID }}
release:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
needs:
- binaries
steps:
@@ -179,30 +167,30 @@ jobs:
uses: actions/download-artifact@v3
with:
name: buildx
path: ${{ env.DESTDIR }}
path: ${{ env.RELEASE_OUT }}
-
name: Create checksums
run: ./hack/hash-files
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
tree -nh ${{ env.RELEASE_OUT }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
find ${{ env.RELEASE_OUT }} -type f -exec file -e ascii -- {} +
-
name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v')
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
draft: true
files: ${{ env.DESTDIR }}/*
files: ${{ env.RELEASE_OUT }}/*
buildkit-edge:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
continue-on-error: true
steps:
-
@@ -215,7 +203,7 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: ${{ env.BUILDX_VERSION }}
version: latest
driver-opts: image=moby/buildkit:master
buildkitd-flags: --debug
-

View File

@@ -2,21 +2,19 @@ name: docs-release
on:
release:
types:
- releases
types: [ released ]
jobs:
open-pr:
runs-on: ubuntu-22.04
if: "!github.event.release.prerelease"
runs-on: ubuntu-latest
steps:
-
name: Checkout docs repo
uses: actions/checkout@v3
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
repository: docker/docs
ref: main
repository: docker/docker.github.io
ref: master
-
name: Prepare
run: |
@@ -44,7 +42,7 @@ jobs:
git add -A .
-
name: Create PR on docs repo
uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04
uses: peter-evans/create-pull-request@923ad837f191474af6b1721408744feb989a4c27 # v4.0.4
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
push-to-fork: docker-tools-robot/docker.github.io

View File

@@ -22,7 +22,7 @@ on:
jobs:
docs-yaml:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
-
name: Checkout
@@ -52,10 +52,67 @@ jobs:
retention-days: 1
validate:
uses: docker/docs/.github/workflows/validate-upstream.yml@main
runs-on: ubuntu-latest
needs:
- docs-yaml
with:
repo: https://github.com/${{ github.repository }}
data-files-id: docs-yaml
data-files-folder: buildx
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
repository: docker/docker.github.io
-
name: Install js-yaml
run: npm install js-yaml
-
# use the actual buildx ref that triggers this workflow, so we make
# sure pages fetched by docs repo are still valid
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/_config.yml#L164-L173
name: Set correct ref to fetch remote resources
uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
const yaml = require('js-yaml');
const configFile = '_config.yml'
const config = yaml.load(fs.readFileSync(configFile, 'utf8'));
for (const remote of config['fetch-remote']) {
if (remote['repo'] != 'https://github.com/docker/buildx') {
continue;
}
remote['ref'] = "${{ github.ref }}";
}
try {
fs.writeFileSync(configFile, yaml.dump(config), 'utf8')
} catch (err) {
console.error(err.message)
process.exit(1)
}
-
name: Prepare
run: |
# print docs jekyll config updated in previous step
yq _config.yml
# cleanup reference yaml docs and js-yaml module
rm -rf ./_data/buildx/* ./node_modules
-
name: Download built reference YAML docs
uses: actions/download-artifact@v3
with:
name: docs-yaml
path: ./_data/buildx/
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
-
name: Validate
uses: docker/bake-action@v2
with:
targets: validate
set: |
*.cache-from=type=gha,scope=docs-upstream
*.cache-to=type=gha,scope=docs-upstream,mode=max

View File

@@ -14,16 +14,12 @@ on:
branches:
- 'master'
- 'v[0-9]*'
paths-ignore:
- 'README.md'
- 'docs/**'
env:
DESTDIR: "./bin"
jobs:
build:
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
env:
BIN_OUT: ./bin
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -44,13 +40,13 @@ jobs:
-
name: Rename binary
run: |
mv ${{ env.DESTDIR }}/build/buildx ${{ env.DESTDIR }}/build/docker-buildx
mv ${{ env.BIN_OUT }}/buildx ${{ env.BIN_OUT }}/docker-buildx
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: binary
path: ${{ env.DESTDIR }}/build
path: ${{ env.BIN_OUT }}
if-no-files-found: error
retention-days: 7

View File

@@ -19,7 +19,7 @@ on:
jobs:
validate:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:

5
.gitignore vendored
View File

@@ -1 +1,4 @@
/bin
bin
coverage
cross-out
release-out

View File

@@ -21,7 +21,7 @@ linters:
- revive
- staticcheck
- typecheck
- nolintlint
- structcheck
disable-all: true
linters-settings:

View File

@@ -1,6 +1,6 @@
# syntax=docker/dockerfile-upstream:master
# syntax=docker/dockerfile:1.4
ARG GO_VERSION=1.19
ARG GO_VERSION=1.18
ARG XX_VERSION=1.1.2
ARG DOCKERD_VERSION=20.10.14
@@ -19,24 +19,20 @@ ENV CGO_ENABLED=0
WORKDIR /src
FROM gobase AS buildx-version
RUN --mount=type=bind,target=. <<EOT
set -e
mkdir /buildx-version
echo -n "$(./hack/git-meta version)" | tee /buildx-version/version
echo -n "$(./hack/git-meta revision)" | tee /buildx-version/revision
EOT
RUN --mount=target=. \
PKG=github.com/docker/buildx VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
echo "-X ${PKG}/version.Version=${VERSION} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
echo -n "${VERSION}" | tee /tmp/.version;
FROM gobase AS buildx-build
ARG LDFLAGS="-w -s"
ARG TARGETPLATFORM
RUN --mount=type=bind,target=. \
--mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT
set -e
xx-go --wrap
DESTDIR=/usr/bin VERSION=$(cat /buildx-version/version) REVISION=$(cat /buildx-version/revision) GO_EXTRA_LDFLAGS="-s -w" ./hack/build
xx-verify --static /usr/bin/docker-buildx
EOT
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
xx-verify --static /usr/bin/buildx
FROM gobase AS test
RUN --mount=type=bind,target=. \
@@ -49,28 +45,23 @@ FROM scratch AS test-coverage
COPY --from=test /tmp/coverage.txt /coverage.txt
FROM scratch AS binaries-unix
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
COPY --link --from=buildx-build /usr/bin/buildx /
FROM binaries-unix AS binaries-darwin
FROM binaries-unix AS binaries-linux
FROM scratch AS binaries-windows
COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx.exe
COPY --link --from=buildx-build /usr/bin/buildx /buildx.exe
FROM binaries-$TARGETOS AS binaries
# enable scanning for this stage
ARG BUILDKIT_SBOM_SCAN_STAGE=true
# Release
FROM --platform=$BUILDPLATFORM alpine AS releaser
WORKDIR /work
ARG TARGETPLATFORM
RUN --mount=from=binaries \
--mount=type=bind,from=buildx-version,source=/buildx-version,target=/buildx-version <<EOT
set -e
mkdir -p /out
cp buildx* "/out/buildx-$(cat /buildx-version/version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
EOT
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
FROM scratch AS release
COPY --from=releaser /out/ /

View File

@@ -4,77 +4,59 @@ else ifneq (, $(shell docker buildx version))
export BUILDX_CMD = docker buildx
else ifneq (, $(shell which buildx))
export BUILDX_CMD = $(which buildx)
else
$(error "Buildx is required: https://github.com/docker/buildx#installing")
endif
export BUILDX_CMD ?= docker buildx
export BIN_OUT = ./bin
export RELEASE_OUT = ./release-out
.PHONY: all
all: binaries
.PHONY: build
build:
./hack/build
.PHONY: shell
shell:
./hack/shell
.PHONY: binaries
binaries:
$(BUILDX_CMD) bake binaries
.PHONY: binaries-cross
binaries-cross:
$(BUILDX_CMD) bake binaries-cross
.PHONY: install
install: binaries
mkdir -p ~/.docker/cli-plugins
install bin/build/buildx ~/.docker/cli-plugins/docker-buildx
install bin/buildx ~/.docker/cli-plugins/docker-buildx
.PHONY: release
release:
./hack/release
.PHONY: validate-all
validate-all: lint test validate-vendor validate-docs
.PHONY: lint
lint:
$(BUILDX_CMD) bake lint
.PHONY: test
test:
$(BUILDX_CMD) bake test
.PHONY: validate-vendor
validate-vendor:
$(BUILDX_CMD) bake validate-vendor
.PHONY: validate-docs
validate-docs:
$(BUILDX_CMD) bake validate-docs
.PHONY: validate-authors
validate-authors:
$(BUILDX_CMD) bake validate-authors
.PHONY: test-driver
test-driver:
./hack/test-driver
.PHONY: vendor
vendor:
./hack/update-vendor
.PHONY: docs
docs:
./hack/update-docs
.PHONY: authors
authors:
$(BUILDX_CMD) bake update-authors
.PHONY: mod-outdated
mod-outdated:
$(BUILDX_CMD) bake mod-outdated
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors

View File

@@ -32,18 +32,15 @@ Key features:
- [Building with buildx](#building-with-buildx)
- [Working with builder instances](#working-with-builder-instances)
- [Building multi-platform images](#building-multi-platform-images)
- [Manuals](docs/manuals)
- [High-level build options with Bake](docs/manuals/bake/index.md)
- [Drivers](docs/manuals/drivers/index.md)
- [Exporters](docs/manuals/exporters/index.md)
- [Cache backends](docs/manuals/cache/backends/index.md)
- [Guides](docs/guides)
- [High-level build options with Bake](docs/guides/bake/index.md)
- [CI/CD](docs/guides/cicd.md)
- [CNI networking](docs/guides/cni-networking.md)
- [Using a custom network](docs/guides/custom-network.md)
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
- [OpenTelemetry support](docs/guides/opentelemetry.md)
- [Registry mirror](docs/guides/registry-mirror.md)
- [Drivers](docs/guides/drivers/index.md)
- [Resource limiting](docs/guides/resource-limiting.md)
- [Reference](docs/reference/buildx.md)
- [`buildx bake`](docs/reference/buildx_bake.md)
@@ -126,8 +123,7 @@ On Windows:
Here is how to install and use Buildx inside a Dockerfile through the
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
```dockerfile
# syntax=docker/dockerfile:1
```Dockerfile
FROM docker
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx
RUN docker buildx version
@@ -194,12 +190,12 @@ through various "drivers". Each driver defines how and where a build should
run, and have different feature sets.
We currently support the following drivers:
- The `docker` driver ([guide](docs/manuals/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `docker-container` driver ([guide](docs/manuals/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `kubernetes` driver ([guide](docs/manuals/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `remote` driver ([guide](docs/manuals/drivers/remote.md))
- The `docker` driver ([guide](docs/guides/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `docker-container` driver ([guide](docs/guides/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `kubernetes` driver ([guide](docs/guides/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `remote` driver ([guide](docs/guides/drivers/remote.md))
For more information on drivers, see the [drivers guide](docs/manuals/drivers/index.md).
For more information on drivers, see the [drivers guide](docs/guides/drivers/index.md).
## Working with builder instances
@@ -302,7 +298,6 @@ inside your Dockerfile and can be leveraged by the processes running as part
of your build.
```dockerfile
# syntax=docker/dockerfile:1
FROM --platform=$BUILDPLATFORM golang:alpine AS build
ARG TARGETPLATFORM
ARG BUILDPLATFORM
@@ -316,7 +311,7 @@ cross-compilation helpers for more advanced use-cases.
## High-level build options
See [`docs/manuals/bake/index.md`](docs/manuals/bake/index.md) for more details.
See [`docs/guides/bake/index.md`](docs/guides/bake/index.md) for more details.
# Contributing

View File

@@ -84,7 +84,7 @@ func ReadLocalFiles(names []string) ([]File, error) {
return out, nil
}
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, map[string]*Group, error) {
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, []*Group, error) {
c, err := ParseFiles(files, defaults)
if err != nil {
return nil, nil, err
@@ -99,39 +99,42 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
return nil, nil, err
}
m := map[string]*Target{}
n := map[string]*Group{}
for _, target := range targets {
ts, gs := c.ResolveGroup(target)
for _, tname := range ts {
t, err := c.ResolveTarget(tname, o)
for _, n := range targets {
for _, n := range c.ResolveGroup(n) {
t, err := c.ResolveTarget(n, o)
if err != nil {
return nil, nil, err
}
if t != nil {
m[tname] = t
}
}
for _, gname := range gs {
for _, group := range c.Groups {
if group.Name == gname {
n[gname] = group
break
}
m[n] = t
}
}
}
for _, target := range targets {
if target == "default" {
continue
var g []*Group
if len(targets) == 0 || (len(targets) == 1 && targets[0] == "default") {
for _, group := range c.Groups {
if group.Name != "default" {
continue
}
g = []*Group{{Targets: group.Targets}}
}
if _, ok := n["default"]; !ok {
n["default"] = &Group{Name: "default"}
} else {
var gt []string
for _, target := range targets {
isGroup := false
for _, group := range c.Groups {
if target == group.Name {
gt = append(gt, group.Targets...)
isGroup = true
break
}
}
if !isGroup {
gt = append(gt, target)
}
}
n["default"].Targets = append(n["default"].Targets, target)
}
if g, ok := n["default"]; ok {
g.Targets = dedupSlice(g.Targets)
g = []*Group{{Targets: dedupSlice(gt)}}
}
for name, t := range m {
@@ -140,7 +143,7 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
}
}
return m, n, nil
return m, g, nil
}
func dedupSlice(s []string) []string {
@@ -270,8 +273,8 @@ func ParseFile(dt []byte, fn string) (*Config, error) {
}
type Config struct {
Groups []*Group `json:"group" hcl:"group,block" cty:"group"`
Targets []*Target `json:"target" hcl:"target,block" cty:"target"`
Groups []*Group `json:"group" hcl:"group,block"`
Targets []*Target `json:"target" hcl:"target,block"`
}
func mergeConfig(c1, c2 Config) Config {
@@ -417,7 +420,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
o := t[kk[1]]
switch keys[1] {
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest":
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh":
if len(parts) == 2 {
o.ArrValue = append(o.ArrValue, parts[1])
}
@@ -450,19 +453,13 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
return m, nil
}
func (c Config) ResolveGroup(name string) ([]string, []string) {
targets, groups := c.group(name, map[string]visit{})
return dedupSlice(targets), dedupSlice(groups)
func (c Config) ResolveGroup(name string) []string {
return dedupSlice(c.group(name, map[string][]string{}))
}
type visit struct {
target []string
group []string
}
func (c Config) group(name string, visited map[string]visit) ([]string, []string) {
if v, ok := visited[name]; ok {
return v.target, v.group
func (c Config) group(name string, visited map[string][]string) []string {
if _, ok := visited[name]; ok {
return visited[name]
}
var g *Group
for _, group := range c.Groups {
@@ -472,24 +469,20 @@ func (c Config) group(name string, visited map[string]visit) ([]string, []string
}
}
if g == nil {
return []string{name}, nil
return []string{name}
}
visited[name] = visit{}
visited[name] = []string{}
targets := make([]string, 0, len(g.Targets))
groups := []string{name}
for _, t := range g.Targets {
ttarget, tgroup := c.group(t, visited)
if len(ttarget) > 0 {
targets = append(targets, ttarget...)
tgroup := c.group(t, visited)
if len(tgroup) > 0 {
targets = append(targets, tgroup...)
} else {
targets = append(targets, t)
}
if len(tgroup) > 0 {
groups = append(groups, tgroup...)
}
}
visited[name] = visit{target: targets, group: groups}
return targets, groups
visited[name] = targets
return targets
}
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
@@ -547,44 +540,42 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
}
type Group struct {
Name string `json:"-" hcl:"name,label" cty:"name"`
Targets []string `json:"targets" hcl:"targets" cty:"targets"`
Name string `json:"-" hcl:"name,label"`
Targets []string `json:"targets" hcl:"targets"`
// Target // TODO?
}
type Target struct {
Name string `json:"-" hcl:"name,label" cty:"name"`
Name string `json:"-" hcl:"name,label"`
// Inherits is the only field that cannot be overridden with --set
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
NetworkMode *string `json:"-" hcl:"-" cty:"-"`
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/manuals/bake/file-definition.md.
Context *string `json:"context,omitempty" hcl:"context,optional"`
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional"`
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
Labels map[string]string `json:"labels,omitempty" hcl:"labels,optional"`
Tags []string `json:"tags,omitempty" hcl:"tags,optional"`
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional"`
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional"`
Target *string `json:"target,omitempty" hcl:"target,optional"`
Secrets []string `json:"secret,omitempty" hcl:"secret,optional"`
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional"`
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional"`
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
NetworkMode *string `json:"-" hcl:"-"`
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional"`
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/guides/bake/file-definition.md.
// linked is a private field to mark a target used as a linked one
linked bool
}
func (t *Target) normalize() {
t.Attest = removeDupes(t.Attest)
t.Tags = removeDupes(t.Tags)
t.Secrets = removeDupes(t.Secrets)
t.SSH = removeDupes(t.SSH)
@@ -615,11 +606,8 @@ func (t *Target) Merge(t2 *Target) {
t.DockerfileInline = t2.DockerfileInline
}
for k, v := range t2.Args {
if v == nil {
continue
}
if t.Args == nil {
t.Args = map[string]*string{}
t.Args = map[string]string{}
}
t.Args[k] = v
}
@@ -630,11 +618,8 @@ func (t *Target) Merge(t2 *Target) {
t.Contexts[k] = v
}
for k, v := range t2.Labels {
if v == nil {
continue
}
if t.Labels == nil {
t.Labels = map[string]*string{}
t.Labels = map[string]string{}
}
t.Labels[k] = v
}
@@ -644,9 +629,6 @@ func (t *Target) Merge(t2 *Target) {
if t2.Target != nil {
t.Target = t2.Target
}
if t2.Attest != nil { // merge
t.Attest = append(t.Attest, t2.Attest...)
}
if t2.Secrets != nil { // merge
t.Secrets = append(t.Secrets, t2.Secrets...)
}
@@ -694,9 +676,9 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
return errors.Errorf("args require name")
}
if t.Args == nil {
t.Args = map[string]*string{}
t.Args = map[string]string{}
}
t.Args[keys[1]] = &value
t.Args[keys[1]] = value
case "contexts":
if len(keys) != 2 {
return errors.Errorf("contexts require name")
@@ -710,9 +692,9 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
return errors.Errorf("labels require name")
}
if t.Labels == nil {
t.Labels = map[string]*string{}
t.Labels = map[string]string{}
}
t.Labels[keys[1]] = &value
t.Labels[keys[1]] = value
case "tags":
t.Tags = o.ArrValue
case "cache-from":
@@ -729,8 +711,6 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
t.Platforms = o.ArrValue
case "output":
t.Outputs = o.ArrValue
case "attest":
t.Attest = append(t.Attest, o.ArrValue...)
case "no-cache":
noCache, err := strconv.ParseBool(value)
if err != nil {
@@ -888,22 +868,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
dockerfilePath = path.Join(contextPath, dockerfilePath)
}
args := map[string]string{}
for k, v := range t.Args {
if v == nil {
continue
}
args[k] = *v
}
labels := map[string]string{}
for k, v := range t.Labels {
if v == nil {
continue
}
labels[k] = *v
}
noCache := false
if t.NoCache != nil {
noCache = *t.NoCache
@@ -944,8 +908,8 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
bo := &build.Options{
Inputs: bi,
Tags: t.Tags,
BuildArgs: args,
Labels: labels,
BuildArgs: t.Args,
Labels: t.Labels,
NoCache: noCache,
NoCacheFilter: t.NoCacheFilter,
Pull: pull,
@@ -1000,12 +964,6 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
}
bo.Exports = outputs
attests, err := buildflags.ParseAttests(t.Attest)
if err != nil {
return nil, err
}
bo.Attests = attests
return bo, nil
}

View File

@@ -2,8 +2,8 @@ package bake
import (
"context"
"os"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/require"
@@ -41,12 +41,12 @@ target "webapp" {
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
require.Equal(t, ".", *m["webapp"].Context)
require.Equal(t, ptrstr("webDEP"), m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, true, *m["webapp"].NoCache)
require.Nil(t, m["webapp"].Pull)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
t.Run("InvalidTargetOverrides", func(t *testing.T) {
@@ -57,7 +57,8 @@ target "webapp" {
t.Run("ArgsOverrides", func(t *testing.T) {
t.Run("leaf", func(t *testing.T) {
t.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
"webapp.args.VAR_UNSET",
@@ -78,15 +79,15 @@ target "webapp" {
_, isSet = m["webapp"].Args["VAR_EMPTY"]
require.True(t, isSet, m["webapp"].Args["VAR_EMPTY"])
require.Equal(t, ptrstr("bananas"), m["webapp"].Args["VAR_SET"])
require.Equal(t, m["webapp"].Args["VAR_SET"], "bananas")
require.Equal(t, ptrstr("fromEnv"), m["webapp"].Args["VAR_FROMENV"+t.Name()])
require.Equal(t, m["webapp"].Args["VAR_FROMENV"+t.Name()], "fromEnv")
require.Equal(t, ptrstr("webapp"), m["webapp"].Args["VAR_BOTH"])
require.Equal(t, ptrstr("override"), m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
// building leaf but overriding parent fields
@@ -97,10 +98,10 @@ target "webapp" {
}, nil)
require.NoError(t, err)
require.Equal(t, ptrstr("override"), m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, ptrstr("webapp"), m["webapp"].Args["VAR_BOTH"])
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
})
@@ -112,7 +113,7 @@ target "webapp" {
require.NoError(t, err)
require.Equal(t, "foo", *m["webapp"].Context)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
t.Run("NoCacheOverride", func(t *testing.T) {
@@ -120,7 +121,7 @@ target "webapp" {
require.NoError(t, err)
require.Equal(t, false, *m["webapp"].NoCache)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
t.Run("PullOverride", func(t *testing.T) {
@@ -128,28 +129,28 @@ target "webapp" {
require.NoError(t, err)
require.Equal(t, false, *m["webapp"].Pull)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
})
t.Run("PatternOverride", func(t *testing.T) {
// same check for two cases
multiTargetCheck := func(t *testing.T, m map[string]*Target, g map[string]*Group, err error) {
multiTargetCheck := func(t *testing.T, m map[string]*Target, g []*Group, err error) {
require.NoError(t, err)
require.Equal(t, 2, len(m))
require.Equal(t, "foo", *m["webapp"].Dockerfile)
require.Equal(t, ptrstr("webDEP"), m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, "foo", *m["webDEP"].Dockerfile)
require.Equal(t, ptrstr("webDEP"), m["webDEP"].Args["VAR_INHERITED"])
require.Equal(t, "webDEP", m["webDEP"].Args["VAR_INHERITED"])
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"webDEP", "webapp"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"webDEP", "webapp"}, g[0].Targets)
}
cases := []struct {
name string
targets []string
overrides []string
check func(*testing.T, map[string]*Target, map[string]*Group, error)
check func(*testing.T, map[string]*Target, []*Group, error)
}{
{
name: "multi target single pattern",
@@ -167,20 +168,20 @@ target "webapp" {
name: "single target",
targets: []string{"webapp"},
overrides: []string{"web*.dockerfile=foo"},
check: func(t *testing.T, m map[string]*Target, g map[string]*Group, err error) {
check: func(t *testing.T, m map[string]*Target, g []*Group, err error) {
require.NoError(t, err)
require.Equal(t, 1, len(m))
require.Equal(t, "foo", *m["webapp"].Dockerfile)
require.Equal(t, ptrstr("webDEP"), m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
require.Equal(t, []string{"webapp"}, g[0].Targets)
},
},
{
name: "nomatch",
targets: []string{"webapp"},
overrides: []string{"nomatch*.dockerfile=foo"},
check: func(t *testing.T, m map[string]*Target, g map[string]*Group, err error) {
check: func(t *testing.T, m map[string]*Target, g []*Group, err error) {
// NOTE: I am unsure whether failing to match should always error out
// instead of simply skipping that override.
// Let's enforce the error and we can relax it later if users complain.
@@ -298,12 +299,12 @@ services:
require.True(t, ok)
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
require.Equal(t, ".", *m["webapp"].Context)
require.Equal(t, ptrstr("1"), m["webapp"].Args["buildno"])
require.Equal(t, ptrstr("12"), m["webapp"].Args["buildno2"])
require.Equal(t, "1", m["webapp"].Args["buildno"])
require.Equal(t, "12", m["webapp"].Args["buildno2"])
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"db", "newservice", "webapp"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"db", "newservice", "webapp"}, g[0].Targets)
}
func TestReadTargetsWithDotCompose(t *testing.T) {
@@ -342,7 +343,7 @@ services:
_, ok := m["web_app"]
require.True(t, ok)
require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile)
require.Equal(t, ptrstr("1"), m["web_app"].Args["buildno"])
require.Equal(t, "1", m["web_app"].Args["buildno"])
m, _, err = ReadTargets(ctx, []File{fp2}, []string{"web_app"}, nil, nil)
require.NoError(t, err)
@@ -350,7 +351,7 @@ services:
_, ok = m["web_app"]
require.True(t, ok)
require.Equal(t, "Dockerfile", *m["web_app"].Dockerfile)
require.Equal(t, ptrstr("12"), m["web_app"].Args["buildno2"])
require.Equal(t, "12", m["web_app"].Args["buildno2"])
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
require.NoError(t, err)
@@ -359,12 +360,12 @@ services:
require.True(t, ok)
require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile)
require.Equal(t, ".", *m["web_app"].Context)
require.Equal(t, ptrstr("1"), m["web_app"].Args["buildno"])
require.Equal(t, ptrstr("12"), m["web_app"].Args["buildno2"])
require.Equal(t, "1", m["web_app"].Args["buildno"])
require.Equal(t, "12", m["web_app"].Args["buildno2"])
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"web_app"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"web_app"}, g[0].Targets)
}
func TestHCLCwdPrefix(t *testing.T) {
@@ -391,7 +392,7 @@ func TestHCLCwdPrefix(t *testing.T) {
require.Equal(t, "foo", *m["app"].Context)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"app"}, g["default"].Targets)
require.Equal(t, []string{"app"}, g[0].Targets)
}
func TestOverrideMerge(t *testing.T) {
@@ -695,7 +696,7 @@ target "image" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"image"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "test", *m["image"].Dockerfile)
}
@@ -716,9 +717,8 @@ target "image" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "test", *m["image"].Dockerfile)
}
@@ -742,17 +742,15 @@ target "image" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "test", *m["image"].Dockerfile)
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "test", *m["image"].Dockerfile)
}
@@ -831,7 +829,7 @@ services:
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, 1, len(m["image"].Outputs))
require.Equal(t, "type=docker", m["image"].Outputs[0])
@@ -839,7 +837,7 @@ services:
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image-release"}, g["default"].Targets)
require.Equal(t, []string{"image-release"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, 1, len(m["image-release"].Outputs))
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
@@ -847,7 +845,7 @@ services:
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image", "image-release"}, g["default"].Targets)
require.Equal(t, []string{"image", "image-release"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, ".", *m["image"].Context)
require.Equal(t, 1, len(m["image-release"].Outputs))
@@ -856,22 +854,22 @@ services:
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, ".", *m["image"].Context)
m, g, err = ReadTargets(ctx, []File{fjson}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"image"}, g["default"].Targets)
require.Equal(t, []string{"image"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, ".", *m["image"].Context)
m, g, err = ReadTargets(ctx, []File{fyml}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"addon", "aws"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"addon", "aws"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
@@ -879,8 +877,8 @@ services:
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"addon", "aws"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"addon", "aws"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
@@ -888,8 +886,8 @@ services:
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws", "image"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
sort.Strings(g["default"].Targets)
require.Equal(t, []string{"addon", "aws", "image"}, g["default"].Targets)
sort.Strings(g[0].Targets)
require.Equal(t, []string{"addon", "aws", "image"}, g[0].Targets)
require.Equal(t, 3, len(m))
require.Equal(t, ".", *m["image"].Context)
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
@@ -915,17 +913,15 @@ target "image" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"foo"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"foo"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"foo"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"foo"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
}
@@ -949,18 +945,16 @@ target "image" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo"}, g["default"].Targets)
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
require.Equal(t, "type=docker", m["image"].Outputs[0])
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(g))
require.Equal(t, []string{"foo", "image"}, g["default"].Targets)
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, "bar", *m["foo"].Dockerfile)
require.Equal(t, "type=docker", m["image"].Outputs[0])
@@ -997,22 +991,22 @@ target "d" {
cases := []struct {
name string
overrides []string
want map[string]*string
want map[string]string
}{
{
name: "nested simple",
overrides: nil,
want: map[string]*string{"bar": ptrstr("234"), "baz": ptrstr("890"), "foo": ptrstr("123")},
want: map[string]string{"bar": "234", "baz": "890", "foo": "123"},
},
{
name: "nested with overrides first",
overrides: []string{"a.args.foo=321", "b.args.bar=432"},
want: map[string]*string{"bar": ptrstr("234"), "baz": ptrstr("890"), "foo": ptrstr("321")},
want: map[string]string{"bar": "234", "baz": "890", "foo": "321"},
},
{
name: "nested with overrides last",
overrides: []string{"a.args.foo=321", "c.args.bar=432"},
want: map[string]*string{"bar": ptrstr("432"), "baz": ptrstr("890"), "foo": ptrstr("321")},
want: map[string]string{"bar": "432", "baz": "890", "foo": "321"},
},
}
for _, tt := range cases {
@@ -1021,7 +1015,7 @@ target "d" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"d"}, tt.overrides, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"d"}, g["default"].Targets)
require.Equal(t, []string{"d"}, g[0].Targets)
require.Equal(t, 1, len(m))
require.Equal(t, tt.want, m["d"].Args)
})
@@ -1065,26 +1059,26 @@ group "default" {
cases := []struct {
name string
overrides []string
wantch1 map[string]*string
wantch2 map[string]*string
wantch1 map[string]string
wantch2 map[string]string
}{
{
name: "nested simple",
overrides: nil,
wantch1: map[string]*string{"BAR": ptrstr("fuu"), "FOO": ptrstr("bar")},
wantch2: map[string]*string{"BAR": ptrstr("fuu"), "FOO": ptrstr("bar"), "FOO2": ptrstr("bar2")},
wantch1: map[string]string{"BAR": "fuu", "FOO": "bar"},
wantch2: map[string]string{"BAR": "fuu", "FOO": "bar", "FOO2": "bar2"},
},
{
name: "nested with overrides first",
overrides: []string{"grandparent.args.BAR=fii", "child1.args.FOO=baaar"},
wantch1: map[string]*string{"BAR": ptrstr("fii"), "FOO": ptrstr("baaar")},
wantch2: map[string]*string{"BAR": ptrstr("fii"), "FOO": ptrstr("bar"), "FOO2": ptrstr("bar2")},
wantch1: map[string]string{"BAR": "fii", "FOO": "baaar"},
wantch2: map[string]string{"BAR": "fii", "FOO": "bar", "FOO2": "bar2"},
},
{
name: "nested with overrides last",
overrides: []string{"grandparent.args.BAR=fii", "child2.args.FOO=baaar"},
wantch1: map[string]*string{"BAR": ptrstr("fii"), "FOO": ptrstr("bar")},
wantch2: map[string]*string{"BAR": ptrstr("fii"), "FOO": ptrstr("baaar"), "FOO2": ptrstr("bar2")},
wantch1: map[string]string{"BAR": "fii", "FOO": "bar"},
wantch2: map[string]string{"BAR": "fii", "FOO": "baaar", "FOO2": "bar2"},
},
}
for _, tt := range cases {
@@ -1093,7 +1087,7 @@ group "default" {
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, tt.overrides, nil)
require.NoError(t, err)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"child1", "child2"}, g["default"].Targets)
require.Equal(t, []string{"child1", "child2"}, g[0].Targets)
require.Equal(t, 2, len(m))
require.Equal(t, tt.wantch1, m["child1"].Args)
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
@@ -1190,67 +1184,44 @@ target "f" {
}`)}
cases := []struct {
names []string
targets []string
groups []string
count int
name string
targets []string
ntargets int
}{
{
names: []string{"a"},
targets: []string{"a"},
groups: []string{"default", "a", "b", "c"},
count: 1,
name: "a",
targets: []string{"b", "c"},
ntargets: 1,
},
{
names: []string{"b"},
targets: []string{"b"},
groups: []string{"default", "b"},
count: 1,
name: "b",
targets: []string{"d"},
ntargets: 1,
},
{
names: []string{"c"},
targets: []string{"c"},
groups: []string{"default", "b", "c"},
count: 1,
name: "c",
targets: []string{"b"},
ntargets: 1,
},
{
names: []string{"d"},
targets: []string{"d"},
groups: []string{"default"},
count: 1,
name: "d",
targets: []string{"d"},
ntargets: 1,
},
{
names: []string{"e"},
targets: []string{"e"},
groups: []string{"default", "a", "b", "c", "e"},
count: 2,
},
{
names: []string{"a", "e"},
targets: []string{"a", "e"},
groups: []string{"default", "a", "b", "c", "e"},
count: 2,
name: "e",
targets: []string{"a", "f"},
ntargets: 2,
},
}
for _, tt := range cases {
tt := tt
t.Run(strings.Join(tt.names, "+"), func(t *testing.T) {
m, g, err := ReadTargets(ctx, []File{f}, tt.names, nil, nil)
t.Run(tt.name, func(t *testing.T) {
m, g, err := ReadTargets(ctx, []File{f}, []string{tt.name}, nil, nil)
require.NoError(t, err)
var gnames []string
for _, g := range g {
gnames = append(gnames, g.Name)
}
sort.Strings(gnames)
sort.Strings(tt.groups)
require.Equal(t, tt.groups, gnames)
sort.Strings(g["default"].Targets)
sort.Strings(tt.targets)
require.Equal(t, tt.targets, g["default"].Targets)
require.Equal(t, tt.count, len(m))
require.Equal(t, 1, len(g))
require.Equal(t, tt.targets, g[0].Targets)
require.Equal(t, tt.ntargets, len(m))
require.Equal(t, ".", *m["d"].Context)
require.Equal(t, "./testdockerfile", *m["d"].Dockerfile)
})
@@ -1283,78 +1254,8 @@ services:
require.Equal(t, 1, len(c.Targets))
require.Equal(t, "app", c.Targets[0].Name)
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
require.Equal(t, "foo", c.Targets[0].Args["v1"])
require.Equal(t, "bar", c.Targets[0].Args["v2"])
require.Equal(t, "dir", *c.Targets[0].Context)
require.Equal(t, "Dockerfile-alternate", *c.Targets[0].Dockerfile)
}
func TestHCLNullVars(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(
`variable "FOO" {
default = null
}
variable "BAR" {
default = null
}
target "default" {
args = {
foo = FOO
bar = "baz"
}
labels = {
"com.docker.app.bar" = BAR
"com.docker.app.baz" = "foo"
}
}`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m))
_, ok := m["default"]
require.True(t, ok)
_, err = TargetsToBuildOpt(m, &Input{})
require.NoError(t, err)
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, m["default"].Args)
require.Equal(t, map[string]*string{"com.docker.app.baz": ptrstr("foo")}, m["default"].Labels)
}
func TestJSONNullVars(t *testing.T) {
fp := File{
Name: "docker-bake.json",
Data: []byte(
`{
"variable": {
"FOO": {
"default": null
}
},
"target": {
"default": {
"args": {
"foo": "${FOO}",
"bar": "baz"
}
}
}
}`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"default"}, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m))
_, ok := m["default"]
require.True(t, ok)
_, err = TargetsToBuildOpt(m, &Input{})
require.NoError(t, err)
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, m["default"].Args)
}

View File

@@ -75,19 +75,13 @@ func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, e
secrets = append(secrets, secret)
}
// compose does not support nil values for labels
labels := map[string]*string{}
for k, v := range s.Build.Labels {
labels[k] = &v
}
g.Targets = append(g.Targets, targetName)
t := &Target{
Name: targetName,
Context: contextPathP,
Dockerfile: dockerfilePathP,
Tags: s.Build.Tags,
Labels: labels,
Labels: s.Build.Labels,
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
if val, ok := s.Environment[val]; ok && val != nil {
return *val, true
@@ -199,16 +193,16 @@ func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string,
return curenv, nil
}
func flatten(in compose.MappingWithEquals) map[string]*string {
func flatten(in compose.MappingWithEquals) compose.Mapping {
if len(in) == 0 {
return nil
}
out := map[string]*string{}
out := compose.Mapping{}
for k, v := range in {
if v == nil {
continue
}
out[k] = v
out[k] = *v
}
return out
}
@@ -228,7 +222,7 @@ type xbake struct {
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
Contexts stringMap `yaml:"contexts,omitempty"`
// don't forget to update documentation if you add a new field:
// docs/manuals/bake/compose-file.md#extension-field-with-x-bake
// docs/guides/bake/compose-file.md#extension-field-with-x-bake
}
type stringMap map[string]string

View File

@@ -60,7 +60,7 @@ secrets:
require.Equal(t, "./dir", *c.Targets[1].Context)
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
require.Equal(t, 1, len(c.Targets[1].Args))
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
require.Equal(t, "123", c.Targets[1].Args["buildno"])
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
require.Equal(t, "none", *c.Targets[1].NetworkMode)
@@ -149,15 +149,18 @@ services:
BRB: FOO
`)
t.Setenv("FOO", "bar")
t.Setenv("BAR", "foo")
t.Setenv("ZZZ_BAR", "zzz_foo")
os.Setenv("FOO", "bar")
defer os.Unsetenv("FOO")
os.Setenv("BAR", "foo")
defer os.Unsetenv("BAR")
os.Setenv("ZZZ_BAR", "zzz_foo")
defer os.Unsetenv("ZZZ_BAR")
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
require.NoError(t, err)
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["FOO"])
require.Equal(t, ptrstr("zzz_foo"), c.Targets[0].Args["BAR"])
require.Equal(t, ptrstr("FOO"), c.Targets[0].Args["BRB"])
require.Equal(t, "bar", c.Targets[0].Args["FOO"])
require.Equal(t, "zzz_foo", c.Targets[0].Args["BAR"])
require.Equal(t, "FOO", c.Targets[0].Args["BRB"])
}
func TestInconsistentComposeFile(t *testing.T) {
@@ -305,7 +308,7 @@ services:
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
require.Equal(t, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"}, c.Targets[0].Args)
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
@@ -378,7 +381,7 @@ services:
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "FOO": ptrstr("bsdf -csdf"), "NODE_ENV": ptrstr("test")}, c.Targets[0].Args)
require.Equal(t, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"}, c.Targets[0].Args)
}
func TestDotEnv(t *testing.T) {
@@ -402,7 +405,7 @@ services:
Data: dt,
}})
require.NoError(t, err)
require.Equal(t, map[string]*string{"FOO": ptrstr("bar")}, c.Targets[0].Args)
require.Equal(t, map[string]string{"FOO": "bar"}, c.Targets[0].Args)
}
func TestPorts(t *testing.T) {
@@ -626,22 +629,6 @@ target "default" {
}
}
func TestComposeNullArgs(t *testing.T) {
var dt = []byte(`
services:
scratch:
build:
context: .
args:
FOO: null
bar: "baz"
`)
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
}
// chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) {

View File

@@ -1,7 +1,7 @@
package bake
import (
"reflect"
"os"
"testing"
"github.com/stretchr/testify/require"
@@ -54,7 +54,7 @@ func TestHCLBasic(t *testing.T) {
require.Equal(t, c.Targets[1].Name, "webapp")
require.Equal(t, 1, len(c.Targets[1].Args))
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
require.Equal(t, "123", c.Targets[1].Args["buildno"])
require.Equal(t, c.Targets[2].Name, "cross")
require.Equal(t, 2, len(c.Targets[2].Platforms))
@@ -62,7 +62,7 @@ func TestHCLBasic(t *testing.T) {
require.Equal(t, c.Targets[3].Name, "webapp-plus")
require.Equal(t, 1, len(c.Targets[3].Args))
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
}
func TestHCLBasicInJSON(t *testing.T) {
@@ -114,7 +114,7 @@ func TestHCLBasicInJSON(t *testing.T) {
require.Equal(t, c.Targets[1].Name, "webapp")
require.Equal(t, 1, len(c.Targets[1].Args))
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
require.Equal(t, "123", c.Targets[1].Args["buildno"])
require.Equal(t, c.Targets[2].Name, "cross")
require.Equal(t, 2, len(c.Targets[2].Platforms))
@@ -122,7 +122,7 @@ func TestHCLBasicInJSON(t *testing.T) {
require.Equal(t, c.Targets[3].Name, "webapp-plus")
require.Equal(t, 1, len(c.Targets[3].Args))
require.Equal(t, map[string]*string{"IAMCROSS": ptrstr("true")}, c.Targets[3].Args)
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
}
func TestHCLWithFunctions(t *testing.T) {
@@ -147,7 +147,7 @@ func TestHCLWithFunctions(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "webapp")
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
require.Equal(t, "124", c.Targets[0].Args["buildno"])
}
func TestHCLWithUserDefinedFunctions(t *testing.T) {
@@ -177,7 +177,7 @@ func TestHCLWithUserDefinedFunctions(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "webapp")
require.Equal(t, ptrstr("124"), c.Targets[0].Args["buildno"])
require.Equal(t, "124", c.Targets[0].Args["buildno"])
}
func TestHCLWithVariables(t *testing.T) {
@@ -206,9 +206,9 @@ func TestHCLWithVariables(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "webapp")
require.Equal(t, ptrstr("123"), c.Targets[0].Args["buildno"])
require.Equal(t, "123", c.Targets[0].Args["buildno"])
t.Setenv("BUILD_NUMBER", "456")
os.Setenv("BUILD_NUMBER", "456")
c, err = ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
@@ -219,7 +219,7 @@ func TestHCLWithVariables(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "webapp")
require.Equal(t, ptrstr("456"), c.Targets[0].Args["buildno"])
require.Equal(t, "456", c.Targets[0].Args["buildno"])
}
func TestHCLWithVariablesInFunctions(t *testing.T) {
@@ -244,7 +244,7 @@ func TestHCLWithVariablesInFunctions(t *testing.T) {
require.Equal(t, c.Targets[0].Name, "webapp")
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
t.Setenv("REPO", "docker/buildx")
os.Setenv("REPO", "docker/buildx")
c, err = ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
@@ -280,10 +280,10 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-abc"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("abc-post"), c.Targets[0].Args["v2"])
require.Equal(t, "pre-abc", c.Targets[0].Args["v1"])
require.Equal(t, "abc-post", c.Targets[0].Args["v2"])
t.Setenv("FOO", "def")
os.Setenv("FOO", "def")
c, err = ParseFiles([]File{
{Data: dt, Name: "c1.hcl"},
@@ -293,11 +293,12 @@ func TestHCLMultiFileSharedVariables(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("def-post"), c.Targets[0].Args["v2"])
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
require.Equal(t, "def-post", c.Targets[0].Args["v2"])
}
func TestHCLVarsWithVars(t *testing.T) {
os.Unsetenv("FOO")
dt := []byte(`
variable "FOO" {
default = upper("${BASE}def")
@@ -329,10 +330,10 @@ func TestHCLVarsWithVars(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre--ABCDEF-"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("ABCDEF-post"), c.Targets[0].Args["v2"])
require.Equal(t, "pre--ABCDEF-", c.Targets[0].Args["v1"])
require.Equal(t, "ABCDEF-post", c.Targets[0].Args["v2"])
t.Setenv("BASE", "new")
os.Setenv("BASE", "new")
c, err = ParseFiles([]File{
{Data: dt, Name: "c1.hcl"},
@@ -342,11 +343,12 @@ func TestHCLVarsWithVars(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre--NEWDEF-"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("NEWDEF-post"), c.Targets[0].Args["v2"])
require.Equal(t, "pre--NEWDEF-", c.Targets[0].Args["v1"])
require.Equal(t, "NEWDEF-post", c.Targets[0].Args["v2"])
}
func TestHCLTypedVariables(t *testing.T) {
os.Unsetenv("FOO")
dt := []byte(`
variable "FOO" {
default = 3
@@ -367,80 +369,33 @@ func TestHCLTypedVariables(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("lower"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("yes"), c.Targets[0].Args["v2"])
require.Equal(t, "lower", c.Targets[0].Args["v1"])
require.Equal(t, "yes", c.Targets[0].Args["v2"])
t.Setenv("FOO", "5.1")
t.Setenv("IS_FOO", "0")
os.Setenv("FOO", "5.1")
os.Setenv("IS_FOO", "0")
c, err = ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("higher"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("no"), c.Targets[0].Args["v2"])
require.Equal(t, "higher", c.Targets[0].Args["v1"])
require.Equal(t, "no", c.Targets[0].Args["v2"])
t.Setenv("FOO", "NaN")
os.Setenv("FOO", "NaN")
_, err = ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
require.Contains(t, err.Error(), "failed to parse FOO as number")
t.Setenv("FOO", "0")
t.Setenv("IS_FOO", "maybe")
os.Setenv("FOO", "0")
os.Setenv("IS_FOO", "maybe")
_, err = ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
require.Contains(t, err.Error(), "failed to parse IS_FOO as bool")
}
func TestHCLNullVariables(t *testing.T) {
dt := []byte(`
variable "FOO" {
default = null
}
target "default" {
args = {
foo = FOO
}
}`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, ptrstr(nil), c.Targets[0].Args["foo"])
t.Setenv("FOO", "bar")
c, err = ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["foo"])
}
func TestJSONNullVariables(t *testing.T) {
dt := []byte(`{
"variable": {
"FOO": {
"default": null
}
},
"target": {
"default": {
"args": {
"foo": "${FOO}"
}
}
}
}`)
c, err := ParseFile(dt, "docker-bake.json")
require.NoError(t, err)
require.Equal(t, ptrstr(nil), c.Targets[0].Args["foo"])
t.Setenv("FOO", "bar")
c, err = ParseFile(dt, "docker-bake.json")
require.NoError(t, err)
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["foo"])
}
func TestHCLVariableCycle(t *testing.T) {
dt := []byte(`
variable "FOO" {
@@ -476,107 +431,19 @@ func TestHCLAttrs(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
// env does not apply if no variable
t.Setenv("FOO", "bar")
os.Setenv("FOO", "bar")
c, err = ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("attr-abcdef"), c.Targets[0].Args["v1"])
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
// attr-multifile
}
func TestHCLTargetAttrs(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = "xxx"
context = target.bar.context
target = target.foo.dockerfile
}
target "bar" {
dockerfile = target.foo.dockerfile
context = "yyy"
target = target.bar.context
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "bar", c.Targets[1].Name)
require.Equal(t, "xxx", *c.Targets[0].Dockerfile)
require.Equal(t, "yyy", *c.Targets[0].Context)
require.Equal(t, "xxx", *c.Targets[0].Target)
require.Equal(t, "xxx", *c.Targets[1].Dockerfile)
require.Equal(t, "yyy", *c.Targets[1].Context)
require.Equal(t, "yyy", *c.Targets[1].Target)
}
func TestHCLTargetGlobal(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = "x"
}
x = target.foo.dockerfile
y = x
target "bar" {
dockerfile = y
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "bar", c.Targets[1].Name)
require.Equal(t, "x", *c.Targets[0].Dockerfile)
require.Equal(t, "x", *c.Targets[1].Dockerfile)
}
func TestHCLTargetAttrName(t *testing.T) {
dt := []byte(`
target "foo" {
dockerfile = target.foo.name
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Equal(t, "foo", *c.Targets[0].Dockerfile)
}
func TestHCLTargetAttrEmptyChain(t *testing.T) {
dt := []byte(`
target "foo" {
# dockerfile = Dockerfile
context = target.foo.dockerfile
target = target.foo.context
}
`)
c, err := ParseFile(dt, "docker-bake.hcl")
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, "foo", c.Targets[0].Name)
require.Nil(t, c.Targets[0].Dockerfile)
require.Nil(t, c.Targets[0].Context)
require.Nil(t, c.Targets[0].Target)
}
func TestHCLAttrsCustomType(t *testing.T) {
dt := []byte(`
platforms=["linux/arm64", "linux/amd64"]
@@ -594,10 +461,11 @@ func TestHCLAttrsCustomType(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
require.Equal(t, "linux/arm64", c.Targets[0].Args["v1"])
}
func TestHCLMultiFileAttrs(t *testing.T) {
os.Unsetenv("FOO")
dt := []byte(`
variable "FOO" {
default = "abc"
@@ -619,9 +487,9 @@ func TestHCLMultiFileAttrs(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-def"), c.Targets[0].Args["v1"])
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
t.Setenv("FOO", "ghi")
os.Setenv("FOO", "ghi")
c, err = ParseFiles([]File{
{Data: dt, Name: "c1.hcl"},
@@ -631,7 +499,7 @@ func TestHCLMultiFileAttrs(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-ghi"), c.Targets[0].Args["v1"])
require.Equal(t, "pre-ghi", c.Targets[0].Args["v1"])
}
func TestJSONAttributes(t *testing.T) {
@@ -642,7 +510,7 @@ func TestJSONAttributes(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-abc-def"), c.Targets[0].Args["v1"])
require.Equal(t, "pre-abc-def", c.Targets[0].Args["v1"])
}
func TestJSONFunctions(t *testing.T) {
@@ -667,7 +535,7 @@ func TestJSONFunctions(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("pre-<FOO-abc>"), c.Targets[0].Args["v1"])
require.Equal(t, "pre-<FOO-abc>", c.Targets[0].Args["v1"])
}
func TestHCLFunctionInAttr(t *testing.T) {
@@ -695,7 +563,7 @@ func TestHCLFunctionInAttr(t *testing.T) {
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("FOO <> [baz]"), c.Targets[0].Args["v1"])
require.Equal(t, "FOO <> [baz]", c.Targets[0].Args["v1"])
}
func TestHCLCombineCompose(t *testing.T) {
@@ -726,8 +594,8 @@ services:
require.Equal(t, 1, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "app")
require.Equal(t, ptrstr("foo"), c.Targets[0].Args["v1"])
require.Equal(t, ptrstr("bar"), c.Targets[0].Args["v2"])
require.Equal(t, "foo", c.Targets[0].Args["v1"])
require.Equal(t, "bar", c.Targets[0].Args["v2"])
require.Equal(t, "dir", *c.Targets[0].Context)
require.Equal(t, "Dockerfile-alternate", *c.Targets[0].Dockerfile)
}
@@ -872,10 +740,10 @@ target "two" {
require.Equal(t, 2, len(c.Targets))
require.Equal(t, c.Targets[0].Name, "one")
require.Equal(t, map[string]*string{"a": ptrstr("pre-ghi-jkl")}, c.Targets[0].Args)
require.Equal(t, map[string]string{"a": "pre-ghi-jkl"}, c.Targets[0].Args)
require.Equal(t, c.Targets[1].Name, "two")
require.Equal(t, map[string]*string{"b": ptrstr("pre-jkl")}, c.Targets[1].Args)
require.Equal(t, map[string]string{"b": "pre-jkl"}, c.Targets[1].Args)
}
func TestEmptyVariableJSON(t *testing.T) {
@@ -914,24 +782,3 @@ func TestFunctionNoResult(t *testing.T) {
_, err := ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
}
func TestVarUnsupportedType(t *testing.T) {
dt := []byte(`
variable "FOO" {
default = []
}
target "default" {}`)
t.Setenv("FOO", "bar")
_, err := ParseFile(dt, "docker-bake.hcl")
require.Error(t, err)
}
func ptrstr(s interface{}) *string {
var n *string = nil
if reflect.ValueOf(s).Kind() == reflect.String {
ss := s.(string)
n = &ss
}
return n
}

View File

@@ -1,103 +0,0 @@
package hclparser
import (
"github.com/hashicorp/hcl/v2"
)
type filterBody struct {
body hcl.Body
schema *hcl.BodySchema
exclude bool
}
func FilterIncludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
return &filterBody{
body: body,
schema: schema,
}
}
func FilterExcludeBody(body hcl.Body, schema *hcl.BodySchema) hcl.Body {
return &filterBody{
body: body,
schema: schema,
exclude: true,
}
}
func (b *filterBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
if b.exclude {
schema = subtractSchemas(schema, b.schema)
} else {
schema = intersectSchemas(schema, b.schema)
}
content, _, diag := b.body.PartialContent(schema)
return content, diag
}
func (b *filterBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
if b.exclude {
schema = subtractSchemas(schema, b.schema)
} else {
schema = intersectSchemas(schema, b.schema)
}
return b.body.PartialContent(schema)
}
func (b *filterBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
return b.body.JustAttributes()
}
func (b *filterBody) MissingItemRange() hcl.Range {
return b.body.MissingItemRange()
}
func intersectSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
result := &hcl.BodySchema{}
for _, blockA := range a.Blocks {
for _, blockB := range b.Blocks {
if blockA.Type == blockB.Type {
result.Blocks = append(result.Blocks, blockA)
break
}
}
}
for _, attrA := range a.Attributes {
for _, attrB := range b.Attributes {
if attrA.Name == attrB.Name {
result.Attributes = append(result.Attributes, attrA)
break
}
}
}
return result
}
func subtractSchemas(a, b *hcl.BodySchema) *hcl.BodySchema {
result := &hcl.BodySchema{}
for _, blockA := range a.Blocks {
found := false
for _, blockB := range b.Blocks {
if blockA.Type == blockB.Type {
found = true
break
}
}
if !found {
result.Blocks = append(result.Blocks, blockA)
}
}
for _, attrA := range a.Attributes {
found := false
for _, attrB := range b.Attributes {
if attrA.Name == attrB.Name {
found = true
break
}
}
if !found {
result.Attributes = append(result.Attributes, attrA)
}
}
return result
}

View File

@@ -83,11 +83,11 @@ func appendJSONFuncCalls(exp hcl.Expression, m map[string]struct{}) error {
// hcl/v2/json/ast#stringVal
val := src.FieldByName("Value")
if !val.IsValid() || val.IsZero() {
if val.IsZero() {
return nil
}
rng := src.FieldByName("SrcRange")
if rng.IsZero() {
if val.IsZero() {
return nil
}
var stringVal struct {

View File

@@ -13,7 +13,6 @@ import (
"github.com/hashicorp/hcl/v2/gohcl"
"github.com/pkg/errors"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
type Opt struct {
@@ -49,17 +48,11 @@ type parser struct {
attrs map[string]*hcl.Attribute
funcs map[string]*functionDef
blocks map[string]map[string][]*hcl.Block
blockValues map[*hcl.Block]reflect.Value
blockTypes map[string]reflect.Type
ectx *hcl.EvalContext
progress map[string]struct{}
progressF map[string]struct{}
progressB map[*hcl.Block]map[string]struct{}
doneF map[string]struct{}
doneB map[*hcl.Block]map[string]struct{}
}
func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.Diagnostics {
@@ -86,69 +79,15 @@ func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.D
if _, ok := exclude[v.RootName()]; ok {
continue
}
if _, ok := p.blockTypes[v.RootName()]; ok {
blockType := v.RootName()
split := v.SimpleSplit().Rel
if len(split) == 0 {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: fmt.Sprintf("cannot access %s as a variable", blockType),
Subject: exp.Range().Ptr(),
Context: exp.Range().Ptr(),
},
}
}
blockName, ok := split[0].(hcl.TraverseAttr)
if !ok {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: fmt.Sprintf("cannot traverse %s without attribute", blockType),
Subject: exp.Range().Ptr(),
Context: exp.Range().Ptr(),
},
}
}
blocks := p.blocks[blockType][blockName.Name]
if len(blocks) == 0 {
continue
}
var target *hcl.BodySchema
if len(split) > 1 {
if attr, ok := split[1].(hcl.TraverseAttr); ok {
target = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{{Name: attr.Name}},
Blocks: []hcl.BlockHeaderSchema{{Type: attr.Name}},
}
}
}
if err := p.resolveBlock(blocks[0], target); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: err.Error(),
Subject: v.SourceRange().Ptr(),
Context: v.SourceRange().Ptr(),
},
}
}
} else {
if err := p.resolveValue(v.RootName()); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: err.Error(),
Subject: v.SourceRange().Ptr(),
Context: v.SourceRange().Ptr(),
},
}
if err := p.resolveValue(v.RootName()); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid expression",
Detail: err.Error(),
Subject: v.SourceRange().Ptr(),
Context: v.SourceRange().Ptr(),
},
}
}
}
@@ -156,8 +95,6 @@ func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.D
return nil
}
// resolveFunction forces evaluation of a function, storing the result into the
// parser.
func (p *parser) resolveFunction(name string) error {
if _, ok := p.doneF[name]; ok {
return nil
@@ -233,8 +170,6 @@ func (p *parser) resolveFunction(name string) error {
return nil
}
// resolveValue forces evaluation of a named value, storing the result into the
// parser.
func (p *parser) resolveValue(name string) (err error) {
if _, ok := p.ectx.Variables[name]; ok {
return nil
@@ -281,16 +216,19 @@ func (p *parser) resolveValue(name string) (err error) {
_, isVar := p.vars[name]
if envv, ok := p.opt.LookupVar(name); ok && isVar {
switch {
case vv.Type().Equals(cty.Bool):
if vv.Type().Equals(cty.Bool) {
b, err := strconv.ParseBool(envv)
if err != nil {
return errors.Wrapf(err, "failed to parse %s as bool", name)
}
vv = cty.BoolVal(b)
case vv.Type().Equals(cty.String), vv.Type().Equals(cty.DynamicPseudoType):
vv = cty.StringVal(envv)
case vv.Type().Equals(cty.Number):
vv := cty.BoolVal(b)
v = &vv
return nil
} else if vv.Type().Equals(cty.String) {
vv := cty.StringVal(envv)
v = &vv
return nil
} else if vv.Type().Equals(cty.Number) {
n, err := strconv.ParseFloat(envv, 64)
if err == nil && (math.IsNaN(n) || math.IsInf(n, 0)) {
err = errors.Errorf("invalid number value")
@@ -298,167 +236,18 @@ func (p *parser) resolveValue(name string) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to parse %s as number", name)
}
vv = cty.NumberVal(big.NewFloat(n))
default:
vv := cty.NumberVal(big.NewFloat(n))
v = &vv
return nil
} else {
// TODO: support lists with csv values
return errors.Errorf("unsupported type %s for variable %s", vv.Type().FriendlyName(), name)
return errors.Errorf("unsupported type %s for variable %s", v.Type(), name)
}
}
v = &vv
return nil
}
// resolveBlock force evaluates a block, storing the result in the parser. If a
// target schema is provided, only the attributes and blocks present in the
// schema will be evaluated.
func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err error) {
name := block.Labels[0]
if err := p.opt.ValidateLabel(name); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid name",
Detail: err.Error(),
Subject: &block.LabelRanges[0],
},
}
}
if _, ok := p.doneB[block]; !ok {
p.doneB[block] = map[string]struct{}{}
}
if _, ok := p.progressB[block]; !ok {
p.progressB[block] = map[string]struct{}{}
}
if target != nil {
// filter out attributes and blocks that are already evaluated
original := target
target = &hcl.BodySchema{}
for _, a := range original.Attributes {
if _, ok := p.doneB[block][a.Name]; !ok {
target.Attributes = append(target.Attributes, a)
}
}
for _, b := range original.Blocks {
if _, ok := p.doneB[block][b.Type]; !ok {
target.Blocks = append(target.Blocks, b)
}
}
if len(target.Attributes) == 0 && len(target.Blocks) == 0 {
return nil
}
}
if target != nil {
// detect reference cycles
for _, a := range target.Attributes {
if _, ok := p.progressB[block][a.Name]; ok {
return errors.Errorf("reference cycle not allowed for %s.%s.%s", block.Type, name, a.Name)
}
}
for _, b := range target.Blocks {
if _, ok := p.progressB[block][b.Type]; ok {
return errors.Errorf("reference cycle not allowed for %s.%s.%s", block.Type, name, b.Type)
}
}
for _, a := range target.Attributes {
p.progressB[block][a.Name] = struct{}{}
}
for _, b := range target.Blocks {
p.progressB[block][b.Type] = struct{}{}
}
}
// create a filtered body that contains only the target properties
body := func() hcl.Body {
if target != nil {
return FilterIncludeBody(block.Body, target)
}
filter := &hcl.BodySchema{}
for k := range p.doneB[block] {
filter.Attributes = append(filter.Attributes, hcl.AttributeSchema{Name: k})
filter.Blocks = append(filter.Blocks, hcl.BlockHeaderSchema{Type: k})
}
return FilterExcludeBody(block.Body, filter)
}
// load dependencies from all targeted properties
t, ok := p.blockTypes[block.Type]
if !ok {
return nil
}
schema, _ := gohcl.ImpliedBodySchema(reflect.New(t).Interface())
content, _, diag := body().PartialContent(schema)
if diag.HasErrors() {
return diag
}
for _, a := range content.Attributes {
diag := p.loadDeps(a.Expr, nil)
if diag.HasErrors() {
return diag
}
}
for _, b := range content.Blocks {
err := p.resolveBlock(b, nil)
if err != nil {
return err
}
}
// decode!
var output reflect.Value
if prev, ok := p.blockValues[block]; ok {
output = prev
} else {
output = reflect.New(t)
setLabel(output, block.Labels[0]) // early attach labels, so we can reference them
}
diag = gohcl.DecodeBody(body(), p.ectx, output.Interface())
if diag.HasErrors() {
return diag
}
p.blockValues[block] = output
// mark all targeted properties as done
for _, a := range content.Attributes {
p.doneB[block][a.Name] = struct{}{}
}
for _, b := range content.Blocks {
p.doneB[block][b.Type] = struct{}{}
}
if target != nil {
for _, a := range target.Attributes {
p.doneB[block][a.Name] = struct{}{}
}
for _, b := range target.Blocks {
p.doneB[block][b.Type] = struct{}{}
}
}
// store the result into the evaluation context (so if can be referenced)
outputType, err := gocty.ImpliedType(output.Interface())
if err != nil {
return err
}
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
if err != nil {
return err
}
var m map[string]cty.Value
if m2, ok := p.ectx.Variables[block.Type]; ok {
m = m2.AsValueMap()
}
if m == nil {
m = map[string]cty.Value{}
}
m[name] = outputValue
p.ectx.Variables[block.Type] = cty.MapVal(m)
return nil
}
func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
reserved := map[string]struct{}{}
schema, _ := gohcl.ImpliedBodySchema(val)
@@ -495,16 +284,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
attrs: map[string]*hcl.Attribute{},
funcs: map[string]*functionDef{},
blocks: map[string]map[string][]*hcl.Block{},
blockValues: map[*hcl.Block]reflect.Value{},
blockTypes: map[string]reflect.Type{},
progress: map[string]struct{}{},
progressF: map[string]struct{}{},
progressB: map[*hcl.Block]map[string]struct{}{},
doneF: map[string]struct{}{},
doneB: map[*hcl.Block]map[string]struct{}{},
doneF: map[string]struct{}{},
ectx: &hcl.EvalContext{
Variables: map[string]cty.Value{},
Functions: stdlibFunctions,
@@ -555,15 +337,20 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
_ = p.resolveValue(k)
}
for _, a := range content.Attributes {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: "global attributes currently not supported",
Subject: &a.Range,
Context: &a.Range,
},
for k := range p.attrs {
if err := p.resolveValue(k); err != nil {
if diags, ok := err.(hcl.Diagnostics); ok {
return diags
}
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: err.Error(),
Subject: &p.attrs[k].Range,
Context: &p.attrs[k].Range,
},
}
}
}
@@ -616,6 +403,19 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
}
}
for _, a := range content.Attributes {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: "global attributes currently not supported",
Subject: &a.Range,
Context: &a.Range,
},
}
}
m := map[string]map[string][]*hcl.Block{}
for _, b := range content.Blocks {
if len(b.Labels) == 0 || len(b.Labels) > 1 {
return hcl.Diagnostics{
@@ -628,16 +428,19 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
},
}
}
bm, ok := p.blocks[b.Type]
bm, ok := m[b.Type]
if !ok {
bm = map[string][]*hcl.Block{}
p.blocks[b.Type] = bm
m[b.Type] = bm
}
lbl := b.Labels[0]
bm[lbl] = append(bm[lbl], b)
}
vt := reflect.ValueOf(val).Elem().Type()
numFields := vt.NumField()
type value struct {
reflect.Value
idx int
@@ -649,11 +452,9 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
}
types := map[string]field{}
vt := reflect.ValueOf(val).Elem().Type()
for i := 0; i < vt.NumField(); i++ {
for i := 0; i < numFields; i++ {
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
types[tags[0]] = field{
idx: i,
typ: vt.Field(i).Type,
@@ -665,29 +466,29 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
for _, b := range content.Blocks {
v := reflect.ValueOf(val)
err := p.resolveBlock(b, nil)
if err != nil {
if diag, ok := err.(hcl.Diagnostics); ok {
if diag.HasErrors() {
diags = append(diags, diag...)
continue
}
} else {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: err.Error(),
Subject: &b.LabelRanges[0],
Context: &b.DefRange,
},
}
t, ok := types[b.Type]
if !ok {
continue
}
vv := reflect.New(t.typ.Elem().Elem())
diag := gohcl.DecodeBody(b.Body, p.ectx, vv.Interface())
if diag.HasErrors() {
diags = append(diags, diag...)
continue
}
if err := opt.ValidateLabel(b.Labels[0]); err != nil {
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid name",
Detail: err.Error(),
Subject: &b.LabelRanges[0],
},
}
}
vv := p.blockValues[b]
t := types[b.Type]
lblIndex := setLabel(vv, b.Labels[0])
oldValue, exists := t.values[b.Labels[0]]
@@ -701,6 +502,7 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
}
}
}
}
if exists {
if m := oldValue.Value.MethodByName("Merge"); m.IsValid() {
@@ -721,23 +523,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
return diags
}
for k := range p.attrs {
if err := p.resolveValue(k); err != nil {
if diags, ok := err.(hcl.Diagnostics); ok {
return diags
}
return hcl.Diagnostics{
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid attribute",
Detail: err.Error(),
Subject: &p.attrs[k].Range,
Context: &p.attrs[k].Range,
},
}
}
}
return nil
}

View File

@@ -6,7 +6,7 @@ import (
"context"
"strings"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/build"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
@@ -20,7 +20,7 @@ type Input struct {
URL string
}
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
var filename string
st, ok := detectGitContext(url)
if !ok {
@@ -33,18 +33,18 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
inp := &Input{State: st, URL: url}
var files []File
var node *builder.Node
for _, n := range nodes {
if n.Err == nil {
node = &n
var di *build.DriverInfo
for _, d := range dis {
if d.Err == nil {
di = &d
continue
}
}
if node == nil {
if di == nil {
return nil, nil, nil
}
c, err := driver.Boot(ctx, ctx, node.Driver, pw)
c, err := driver.Boot(ctx, ctx, di.Driver, pw)
if err != nil {
return nil, nil, err
}

View File

@@ -22,9 +22,7 @@ import (
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/resolver"
@@ -33,6 +31,7 @@ import (
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/builder/remotecontext/urlutil"
dockerclient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
@@ -67,7 +66,6 @@ type Options struct {
Inputs Inputs
Allow []entitlements.Entitlement
Attests map[string]*string
BuildArgs map[string]string
CacheFrom []client.CacheOptionsEntry
CacheTo []client.CacheOptionsEntry
@@ -111,15 +109,28 @@ type NamedContext struct {
State *llb.State
}
func filterAvailableNodes(nodes []builder.Node) ([]builder.Node, error) {
out := make([]builder.Node, 0, len(nodes))
type DriverInfo struct {
Driver driver.Driver
Name string
Platform []specs.Platform
Err error
ImageOpt imagetools.Opt
ProxyConfig map[string]string
}
type DockerAPI interface {
DockerAPI(name string) (dockerclient.APIClient, error)
}
func filterAvailableDrivers(drivers []DriverInfo) ([]DriverInfo, error) {
out := make([]DriverInfo, 0, len(drivers))
err := errors.Errorf("no drivers found")
for _, n := range nodes {
if n.Err == nil && n.Driver != nil {
out = append(out, n)
for _, di := range drivers {
if di.Err == nil && di.Driver != nil {
out = append(out, di)
}
if n.Err != nil {
err = n.Err
if di.Err != nil {
err = di.Err
}
}
if len(out) > 0 {
@@ -158,8 +169,8 @@ func allIndexes(l int) []int {
return out
}
func ensureBooted(ctx context.Context, nodes []builder.Node, idxs []int, pw progress.Writer) ([]*client.Client, error) {
clients := make([]*client.Client, len(nodes))
func ensureBooted(ctx context.Context, drivers []DriverInfo, idxs []int, pw progress.Writer) ([]*client.Client, error) {
clients := make([]*client.Client, len(drivers))
baseCtx := ctx
eg, ctx := errgroup.WithContext(ctx)
@@ -167,7 +178,7 @@ func ensureBooted(ctx context.Context, nodes []builder.Node, idxs []int, pw prog
for _, i := range idxs {
func(i int) {
eg.Go(func() error {
c, err := driver.Boot(ctx, baseCtx, nodes[i].Driver, pw)
c, err := driver.Boot(ctx, baseCtx, drivers[i].Driver, pw)
if err != nil {
return err
}
@@ -208,8 +219,8 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
return m
}
func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
dps, clients, err := resolveDriversBase(ctx, nodes, opt, pw)
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
dps, clients, err := resolveDriversBase(ctx, drivers, opt, pw)
if err != nil {
return nil, nil, err
}
@@ -249,10 +260,10 @@ func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Op
return dps, clients, nil
}
func resolveDriversBase(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
availablePlatforms := map[string]int{}
for i, node := range nodes {
for _, p := range node.Platforms {
for i, d := range drivers {
for _, p := range d.Platform {
availablePlatforms[platforms.Format(p)] = i
}
}
@@ -270,12 +281,12 @@ func resolveDriversBase(ctx context.Context, nodes []builder.Node, opt map[strin
}
// fast path
if len(nodes) == 1 || len(allPlatforms) == 0 {
if len(drivers) == 1 || len(allPlatforms) == 0 {
m := map[string][]driverPair{}
for k, opt := range opt {
m[k] = []driverPair{{driverIndex: 0, platforms: opt.Platforms}}
}
clients, err := ensureBooted(ctx, nodes, driverIndexes(m), pw)
clients, err := ensureBooted(ctx, drivers, driverIndexes(m), pw)
if err != nil {
return nil, nil, err
}
@@ -285,7 +296,7 @@ func resolveDriversBase(ctx context.Context, nodes []builder.Node, opt map[strin
// map based on existing platforms
if !undetectedPlatform {
m := splitToDriverPairs(availablePlatforms, opt)
clients, err := ensureBooted(ctx, nodes, driverIndexes(m), pw)
clients, err := ensureBooted(ctx, drivers, driverIndexes(m), pw)
if err != nil {
return nil, nil, err
}
@@ -293,7 +304,7 @@ func resolveDriversBase(ctx context.Context, nodes []builder.Node, opt map[strin
}
// boot all drivers in k
clients, err := ensureBooted(ctx, nodes, allIndexes(len(nodes)), pw)
clients, err := ensureBooted(ctx, drivers, allIndexes(len(drivers)), pw)
if err != nil {
return nil, nil, err
}
@@ -355,8 +366,8 @@ func toRepoOnly(in string) (string, error) {
return strings.Join(out, ","), nil
}
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
nodeDriver := node.Driver
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
d := di.Driver
defers := make([]func(), 0, 2)
releaseF := func() {
for _, f := range defers {
@@ -388,8 +399,8 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
}
for _, e := range opt.CacheTo {
if e.Type != "inline" && !nodeDriver.Features()[driver.CacheExport] {
return nil, nil, notSupported(nodeDriver, driver.CacheExport)
if e.Type != "inline" && !d.Features()[driver.CacheExport] {
return nil, nil, notSupported(d, driver.CacheExport)
}
}
@@ -399,10 +410,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
continue
}
} else if e.Type == "s3" {
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
continue
}
}
cacheTo = append(cacheTo, e)
}
@@ -413,10 +420,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
continue
}
} else if e.Type == "s3" {
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.s3")) {
continue
}
}
cacheFrom = append(cacheFrom, e)
}
@@ -449,7 +452,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
case 1:
// valid
case 0:
if nodeDriver.IsMobyDriver() && !noDefaultLoad() {
if d.IsMobyDriver() && !noDefaultLoad() {
// backwards compat for docker driver only:
// this ensures the build results in a docker image.
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
@@ -498,15 +501,15 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
if (e.Type == "local" || e.Type == "tar") && opt.ImageIDFile != "" {
return nil, nil, errors.Errorf("local and tar exporters are incompatible with image ID file")
}
if e.Type == "oci" && !nodeDriver.Features()[driver.OCIExporter] {
return nil, nil, notSupported(nodeDriver, driver.OCIExporter)
if e.Type == "oci" && !d.Features()[driver.OCIExporter] {
return nil, nil, notSupported(d, driver.OCIExporter)
}
if e.Type == "docker" {
if len(opt.Platforms) > 1 {
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
}
if e.Output == nil {
if nodeDriver.IsMobyDriver() {
if d.IsMobyDriver() {
e.Type = "image"
} else {
w, cancel, err := dl(e.Attrs["context"])
@@ -516,11 +519,11 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
defers = append(defers, cancel)
opt.Exports[i].Output = wrapWriteCloser(w)
}
} else if !nodeDriver.Features()[driver.DockerExporter] {
return nil, nil, notSupported(nodeDriver, driver.DockerExporter)
} else if !d.Features()[driver.DockerExporter] {
return nil, nil, notSupported(d, driver.DockerExporter)
}
}
if e.Type == "image" && nodeDriver.IsMobyDriver() {
if e.Type == "image" && d.IsMobyDriver() {
opt.Exports[i].Type = "moby"
if e.Attrs["push"] != "" {
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
@@ -541,7 +544,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
so.Exports = opt.Exports
so.Session = opt.Session
releaseLoad, err := LoadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
releaseLoad, err := LoadInputs(ctx, d, opt.Inputs, pw, &so)
if err != nil {
return nil, nil, err
}
@@ -573,35 +576,20 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
so.FrontendAttrs["label:"+k] = v
}
for k, v := range node.ProxyConfig {
for k, v := range di.ProxyConfig {
if _, ok := opt.BuildArgs[k]; !ok {
so.FrontendAttrs["build-arg:"+k] = v
}
}
if len(opt.Attests) > 0 {
if !bopts.LLBCaps.Contains(apicaps.CapID("exporter.image.attestations")) {
return nil, nil, errors.Errorf("attestations are not supported by the current buildkitd")
}
for k, v := range opt.Attests {
if v == nil {
continue
}
so.FrontendAttrs[k] = *v
}
}
if _, ok := opt.Attests["attest:provenance"]; !ok {
so.FrontendAttrs["attest:provenance"] = "mode=min,inline-only=true"
}
// set platforms
if len(opt.Platforms) != 0 {
pp := make([]string, len(opt.Platforms))
for i, p := range opt.Platforms {
pp[i] = platforms.Format(p)
}
if len(pp) > 1 && !nodeDriver.Features()[driver.MultiPlatform] {
return nil, nil, notSupported(nodeDriver, driver.MultiPlatform)
if len(pp) > 1 && !d.Features()[driver.MultiPlatform] {
return nil, nil, notSupported(d, driver.MultiPlatform)
}
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
}
@@ -619,13 +607,11 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
}
// setup extrahosts
extraHosts, err := toBuildkitExtraHosts(opt.ExtraHosts, nodeDriver.IsMobyDriver())
extraHosts, err := toBuildkitExtraHosts(opt.ExtraHosts)
if err != nil {
return nil, nil, err
}
if len(extraHosts) > 0 {
so.FrontendAttrs["add-hosts"] = extraHosts
}
so.FrontendAttrs["add-hosts"] = extraHosts
// setup shm size
if opt.ShmSize.Value() > 0 {
@@ -646,17 +632,14 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
// ContainerConfig is configuration for a container to run.
type ContainerConfig struct {
ResultCtx *ResultContext
Stdin io.ReadCloser
Stdout io.WriteCloser
Stderr io.WriteCloser
Tty bool
Entrypoint []string
Cmd []string
Env []string
User *string
Cwd *string
Args []string
Env []string
User string
Cwd string
Tty bool
Stdin io.ReadCloser
Stdout io.WriteCloser
Stderr io.WriteCloser
}
// ResultContext is a build result with the client that built it.
@@ -712,53 +695,11 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
}
defer ctr.Release(context.TODO())
imgData := res.Metadata[exptypes.ExporterImageConfigKey]
var img *specs.Image
if len(imgData) > 0 {
img = &specs.Image{}
if err := json.Unmarshal(imgData, img); err != nil {
fmt.Println(err)
return nil, err
}
}
user := ""
if cfg.User != nil {
user = *cfg.User
} else if img != nil {
user = img.Config.User
}
cwd := ""
if cfg.Cwd != nil {
cwd = *cfg.Cwd
} else if img != nil {
cwd = img.Config.WorkingDir
}
env := []string{}
if img != nil {
env = append(env, img.Config.Env...)
}
env = append(env, cfg.Env...)
args := []string{}
if cfg.Entrypoint != nil {
args = append(args, cfg.Entrypoint...)
} else if img != nil {
args = append(args, img.Config.Entrypoint...)
}
if cfg.Cmd != nil {
args = append(args, cfg.Cmd...)
} else if img != nil {
args = append(args, img.Config.Cmd...)
}
proc, err := ctr.Start(ctx, gateway.StartRequest{
Args: args,
Env: env,
User: user,
Cwd: cwd,
Args: cfg.Args,
Env: cfg.Env,
User: cfg.User,
Cwd: cfg.Cwd,
Tty: cfg.Tty,
Stdin: cfg.Stdin,
Stdout: cfg.Stdout,
@@ -788,24 +729,24 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
return err
}
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil, false)
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
return BuildWithResultHandler(ctx, drivers, opt, docker, configDir, w, nil, false)
}
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
if len(nodes) == 0 {
func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
if len(drivers) == 0 {
return nil, errors.Errorf("driver required for build")
}
nodes, err = filterAvailableNodes(nodes)
drivers, err = filterAvailableDrivers(drivers)
if err != nil {
return nil, errors.Wrapf(err, "no valid drivers found")
}
var noMobyDriver driver.Driver
for _, n := range nodes {
if !n.Driver.IsMobyDriver() {
noMobyDriver = n.Driver
for _, d := range drivers {
if !d.Driver.IsMobyDriver() {
noMobyDriver = d.Driver
break
}
}
@@ -829,7 +770,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
}
}
m, clients, err := resolveDrivers(ctx, nodes, opt, w)
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
if err != nil {
return nil, err
}
@@ -848,22 +789,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
for k, opt := range opt {
multiDriver := len(m[k]) > 1
hasMobyDriver := false
gitattrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
if err != nil {
logrus.Warn(err)
}
for i, np := range m[k] {
node := nodes[np.driverIndex]
if node.Driver.IsMobyDriver() {
for i, dp := range m[k] {
di := drivers[dp.driverIndex]
if di.Driver.IsMobyDriver() {
hasMobyDriver = true
}
opt.Platforms = np.platforms
so, release, err := toSolveOpt(ctx, node, multiDriver, opt, np.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
return docker.LoadImage(ctx, name, w)
opt.Platforms = dp.platforms
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
return newDockerLoader(ctx, docker, name, w)
})
for k, v := range gitattrs {
so.FrontendAttrs[k] = v
}
if err != nil {
return nil, err
}
@@ -990,7 +924,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
if len(descs) > 0 {
var imageopt imagetools.Opt
for _, dp := range dps {
imageopt = nodes[dp.driverIndex].ImageOpt
imageopt = drivers[dp.driverIndex].ImageOpt
break
}
names := strings.Split(pushNames, ",")
@@ -1115,13 +1049,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
req := gateway.SolveRequest{
Frontend: so.Frontend,
FrontendOpt: so.FrontendAttrs,
FrontendInputs: frontendInputs,
FrontendOpt: make(map[string]string),
}
for k, v := range so.FrontendAttrs {
req.FrontendOpt[k] = v
}
so.Frontend = ""
so.FrontendAttrs = nil
so.FrontendInputs = nil
ch, done := progress.NewChannel(pw)
@@ -1191,8 +1123,8 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
rr.ExporterResponse[k] = string(v)
}
node := nodes[dp.driverIndex].Driver
if node.IsMobyDriver() {
d := drivers[dp.driverIndex].Driver
if d.IsMobyDriver() {
for _, e := range so.Exports {
if e.Type == "moby" && e.Attrs["push"] != "" {
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
@@ -1204,12 +1136,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
pushList := strings.Split(pushNames, ",")
for _, name := range pushList {
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
return pushWithMoby(ctx, node, name, l)
return pushWithMoby(ctx, d, name, l)
}); err != nil {
return err
}
}
remoteDigest, err := remoteDigestWithMoby(ctx, node, pushList[0])
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
if err == nil && remoteDigest != "" {
// old daemons might not have containerimage.config.digest set
// in response so use containerimage.digest value for it if available
@@ -1631,6 +1563,40 @@ func notSupported(d driver.Driver, f driver.Feature) error {
type dockerLoadCallback func(name string) (io.WriteCloser, func(), error)
func newDockerLoader(ctx context.Context, d DockerAPI, name string, status progress.Writer) (io.WriteCloser, func(), error) {
c, err := d.DockerAPI(name)
if err != nil {
return nil, nil, err
}
pr, pw := io.Pipe()
done := make(chan struct{})
ctx, cancel := context.WithCancel(ctx)
var w *waitingWriter
w = &waitingWriter{
PipeWriter: pw,
f: func() {
resp, err := c.ImageLoad(ctx, pr, false)
defer close(done)
if err != nil {
pr.CloseWithError(err)
w.mu.Lock()
w.err = err
w.mu.Unlock()
return
}
prog := progress.WithPrefix(status, "", false)
progress.FromReader(prog, "importing to docker", resp.Body)
},
done: done,
cancel: cancel,
}
return w, func() {
pr.Close()
}, nil
}
func noDefaultLoad() bool {
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
if !ok {
@@ -1643,6 +1609,34 @@ func noDefaultLoad() bool {
return b
}
type waitingWriter struct {
*io.PipeWriter
f func()
once sync.Once
mu sync.Mutex
err error
done chan struct{}
cancel func()
}
func (w *waitingWriter) Write(dt []byte) (int, error) {
w.once.Do(func() {
go w.f()
})
return w.PipeWriter.Write(dt)
}
func (w *waitingWriter) Close() error {
err := w.PipeWriter.Close()
<-w.done
if err == nil {
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
return err
}
// handle https://github.com/moby/moby/pull/10858
func handleLowercaseDockerfile(dir, p string) string {
if filepath.Base(p) != "Dockerfile" {

View File

@@ -1,111 +0,0 @@
package build
import (
"context"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/docker/buildx/util/gitutil"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const DockerfileLabel = "com.docker.image.source.entrypoint"
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (res map[string]string, _ error) {
res = make(map[string]string)
if contextPath == "" {
return
}
setGitLabels := false
if v, ok := os.LookupEnv("BUILDX_GIT_LABELS"); ok {
if v == "full" { // backward compatibility with old "full" mode
setGitLabels = true
} else if v, err := strconv.ParseBool(v); err == nil {
setGitLabels = v
}
}
setGitInfo := true
if v, ok := os.LookupEnv("BUILDX_GIT_INFO"); ok {
if v, err := strconv.ParseBool(v); err == nil {
setGitInfo = v
}
}
if !setGitLabels && !setGitInfo {
return
}
// figure out in which directory the git command needs to run in
var wd string
if filepath.IsAbs(contextPath) {
wd = contextPath
} else {
cwd, _ := os.Getwd()
wd, _ = filepath.Abs(filepath.Join(cwd, contextPath))
}
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
if err != nil {
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
return res, errors.New("git was not found in the system. Current commit information was not captured by the build")
}
return
}
if !gitc.IsInsideWorkTree() {
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
return res, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
}
return res, nil
}
if sha, err := gitc.FullCommit(); err != nil {
return res, errors.Wrapf(err, "failed to get git commit")
} else if sha != "" {
if gitc.IsDirty() {
sha += "-dirty"
}
if setGitLabels {
res["label:"+specs.AnnotationRevision] = sha
}
if setGitInfo {
res["vcs:revision"] = sha
}
}
if rurl, err := gitc.RemoteURL(); err != nil {
return res, errors.Wrapf(err, "failed to get git remote url")
} else if rurl != "" {
if setGitLabels {
res["label:"+specs.AnnotationSource] = rurl
}
if setGitInfo {
res["vcs:source"] = rurl
}
}
if setGitLabels {
if root, err := gitc.RootDir(); err != nil {
return res, errors.Wrapf(err, "failed to get git root dir")
} else if root != "" {
if dockerfilePath == "" {
dockerfilePath = filepath.Join(wd, "Dockerfile")
}
if !filepath.IsAbs(dockerfilePath) {
cwd, _ := os.Getwd()
dockerfilePath = filepath.Join(cwd, dockerfilePath)
}
dockerfilePath, _ = filepath.Rel(root, dockerfilePath)
if !strings.HasPrefix(dockerfilePath, "..") {
res["label:"+DockerfileLabel] = dockerfilePath
}
}
}
return
}

View File

@@ -1,155 +0,0 @@
package build
import (
"context"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/docker/buildx/util/gitutil"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func setupTest(tb testing.TB) {
gitutil.Mktmp(tb)
c, err := gitutil.New()
require.NoError(tb, err)
gitutil.GitInit(c, tb)
df := []byte("FROM alpine:latest\n")
assert.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
gitutil.GitAdd(c, tb, "Dockerfile")
gitutil.GitCommit(c, tb, "initial commit")
gitutil.GitSetRemote(c, tb, "git@github.com:docker/buildx.git")
}
func TestGetGitAttributesNotGitRepo(t *testing.T) {
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
assert.NoError(t, err)
}
func TestGetGitAttributesBadGitRepo(t *testing.T) {
tmp := t.TempDir()
require.NoError(t, os.MkdirAll(path.Join(tmp, ".git"), 0755))
_, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
assert.Error(t, err)
}
func TestGetGitAttributesNoContext(t *testing.T) {
setupTest(t)
gitattrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
assert.NoError(t, err)
assert.Empty(t, gitattrs)
}
func TestGetGitAttributes(t *testing.T) {
cases := []struct {
name string
envGitLabels string
envGitInfo string
expected []string
}{
{
name: "default",
envGitLabels: "",
envGitInfo: "",
expected: []string{
"vcs:revision",
"vcs:source",
},
},
{
name: "none",
envGitLabels: "false",
envGitInfo: "false",
expected: []string{},
},
{
name: "gitinfo",
envGitLabels: "false",
envGitInfo: "true",
expected: []string{
"vcs:revision",
"vcs:source",
},
},
{
name: "gitlabels",
envGitLabels: "true",
envGitInfo: "false",
expected: []string{
"label:" + DockerfileLabel,
"label:" + specs.AnnotationRevision,
"label:" + specs.AnnotationSource,
},
},
{
name: "both",
envGitLabels: "true",
envGitInfo: "",
expected: []string{
"label:" + DockerfileLabel,
"label:" + specs.AnnotationRevision,
"label:" + specs.AnnotationSource,
"vcs:revision",
"vcs:source",
},
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
setupTest(t)
if tt.envGitLabels != "" {
t.Setenv("BUILDX_GIT_LABELS", tt.envGitLabels)
}
if tt.envGitInfo != "" {
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
}
gitattrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
require.NoError(t, err)
for _, e := range tt.expected {
assert.Contains(t, gitattrs, e)
assert.NotEmpty(t, gitattrs[e])
if e == "label:"+DockerfileLabel {
assert.Equal(t, "Dockerfile", gitattrs[e])
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e])
}
}
})
}
}
func TestGetGitAttributesDirty(t *testing.T) {
setupTest(t)
// make a change to test dirty flag
df := []byte("FROM alpine:edge\n")
require.NoError(t, os.Mkdir("dir", 0755))
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
t.Setenv("BUILDX_GIT_LABELS", "true")
gitattrs, _ := getGitAttributes(context.Background(), ".", "Dockerfile")
assert.Equal(t, 5, len(gitattrs))
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
assert.Contains(t, gitattrs, "vcs:source")
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
assert.Contains(t, gitattrs, "vcs:revision")
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
}

View File

@@ -11,14 +11,8 @@ import (
"github.com/pkg/errors"
)
const (
// archiveHeaderSize is the number of bytes in an archive header
archiveHeaderSize = 512
// mobyHostGatewayName defines a special string which users can append to
// --add-host to add an extra entry in /etc/hosts that maps
// host.docker.internal to the host IP
mobyHostGatewayName = "host-gateway"
)
// archiveHeaderSize is the number of bytes in an archive header
const archiveHeaderSize = 512
func isLocalDir(c string) bool {
st, err := os.Stat(c)
@@ -45,23 +39,18 @@ func isArchive(header []byte) bool {
}
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
func toBuildkitExtraHosts(inp []string, mobyDriver bool) (string, error) {
func toBuildkitExtraHosts(inp []string) (string, error) {
if len(inp) == 0 {
return "", nil
}
hosts := make([]string, 0, len(inp))
for _, h := range inp {
host, ip, ok := strings.Cut(h, ":")
if !ok || host == "" || ip == "" {
parts := strings.Split(h, ":")
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
return "", errors.Errorf("invalid host %s", h)
}
// Skip IP address validation for "host-gateway" string with moby driver
if !mobyDriver || ip != mobyHostGatewayName {
if net.ParseIP(ip) == nil {
return "", errors.Errorf("invalid host %s", h)
}
}
hosts = append(hosts, host+"="+ip)
hosts = append(hosts, parts[0]+"="+parts[1])
}
return strings.Join(hosts, ","), nil
}

View File

@@ -1,292 +0,0 @@
package builder
import (
"context"
"os"
"sort"
"sync"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// Builder represents an active builder object
type Builder struct {
*store.NodeGroup
driverFactory driverFactory
nodes []Node
opts builderOpts
err error
}
type builderOpts struct {
dockerCli command.Cli
name string
txn *store.Txn
contextPathHash string
validate bool
}
// Option provides a variadic option for configuring the builder.
type Option func(b *Builder)
// WithName sets builder name.
func WithName(name string) Option {
return func(b *Builder) {
b.opts.name = name
}
}
// WithStore sets a store instance used at init.
func WithStore(txn *store.Txn) Option {
return func(b *Builder) {
b.opts.txn = txn
}
}
// WithContextPathHash is used for determining pods in k8s driver instance.
func WithContextPathHash(contextPathHash string) Option {
return func(b *Builder) {
b.opts.contextPathHash = contextPathHash
}
}
// WithSkippedValidation skips builder context validation.
func WithSkippedValidation() Option {
return func(b *Builder) {
b.opts.validate = false
}
}
// New initializes a new builder client
func New(dockerCli command.Cli, opts ...Option) (_ *Builder, err error) {
b := &Builder{
opts: builderOpts{
dockerCli: dockerCli,
validate: true,
},
}
for _, opt := range opts {
opt(b)
}
if b.opts.txn == nil {
// if store instance is nil we create a short-lived one using the
// default store and ensure we release it on completion
var release func()
b.opts.txn, release, err = storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
}
if b.opts.name != "" {
if b.NodeGroup, err = storeutil.GetNodeGroup(b.opts.txn, dockerCli, b.opts.name); err != nil {
return nil, err
}
} else {
if b.NodeGroup, err = storeutil.GetCurrentInstance(b.opts.txn, dockerCli); err != nil {
return nil, err
}
}
if b.opts.validate {
if err = b.Validate(); err != nil {
return nil, err
}
}
return b, nil
}
// Validate validates builder context
func (b *Builder) Validate() error {
if b.NodeGroup.DockerContext {
list, err := b.opts.dockerCli.ContextStore().List()
if err != nil {
return err
}
currentContext := b.opts.dockerCli.CurrentContext()
for _, l := range list {
if l.Name == b.Name && l.Name != currentContext {
return errors.Errorf("use `docker --context=%s buildx` to switch to context %q", l.Name, l.Name)
}
}
}
return nil
}
// ContextName returns builder context name if available.
func (b *Builder) ContextName() string {
ctxbuilders, err := b.opts.dockerCli.ContextStore().List()
if err != nil {
return ""
}
for _, cb := range ctxbuilders {
if b.NodeGroup.Driver == "docker" && len(b.NodeGroup.Nodes) == 1 && b.NodeGroup.Nodes[0].Endpoint == cb.Name {
return cb.Name
}
}
return ""
}
// ImageOpt returns registry auth configuration
func (b *Builder) ImageOpt() (imagetools.Opt, error) {
return storeutil.GetImageConfig(b.opts.dockerCli, b.NodeGroup)
}
// Boot bootstrap a builder
func (b *Builder) Boot(ctx context.Context) (bool, error) {
toBoot := make([]int, 0, len(b.nodes))
for idx, d := range b.nodes {
if d.Err != nil || d.Driver == nil || d.DriverInfo == nil {
continue
}
if d.DriverInfo.Status != driver.Running {
toBoot = append(toBoot, idx)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
if err != nil {
return false, err
}
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, b.NodeGroup.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
if err != nil {
b.nodes[idx].Err = err
}
return nil
})
}(idx)
}
err = eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}
// Inactive checks if all nodes are inactive for this builder.
func (b *Builder) Inactive() bool {
for _, d := range b.nodes {
if d.DriverInfo != nil && d.DriverInfo.Status == driver.Running {
return false
}
}
return true
}
// Err returns error if any.
func (b *Builder) Err() error {
return b.err
}
type driverFactory struct {
driver.Factory
once sync.Once
}
// Factory returns the driver factory.
func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
b.driverFactory.once.Do(func() {
if b.Driver != "" {
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
if err != nil {
return
}
} else {
// empty driver means nodegroup was implicitly created as a default
// driver for a docker context and allows falling back to a
// docker-container driver for older daemon that doesn't support
// buildkit (< 18.06).
ep := b.NodeGroup.Nodes[0].Endpoint
var dockerapi *dockerutil.ClientAPI
dockerapi, err = dockerutil.NewClientAPI(b.opts.dockerCli, b.NodeGroup.Nodes[0].Endpoint)
if err != nil {
return
}
// check if endpoint is healthy is needed to determine the driver type.
// if this fails then can't continue with driver selection.
if _, err = dockerapi.Ping(ctx); err != nil {
return
}
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
if err != nil {
return
}
b.Driver = b.driverFactory.Factory.Name()
}
})
return b.driverFactory.Factory, err
}
// GetBuilders returns all builders
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
storeng, err := txn.List()
if err != nil {
return nil, err
}
builders := make([]*Builder, len(storeng))
seen := make(map[string]struct{})
for i, ng := range storeng {
b, err := New(dockerCli,
WithName(ng.Name),
WithStore(txn),
WithSkippedValidation(),
)
if err != nil {
return nil, err
}
builders[i] = b
seen[b.NodeGroup.Name] = struct{}{}
}
contexts, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
sort.Slice(contexts, func(i, j int) bool {
return contexts[i].Name < contexts[j].Name
})
for _, c := range contexts {
// if a context has the same name as an instance from the store, do not
// add it to the builders list. An instance from the store takes
// precedence over context builders.
if _, ok := seen[c.Name]; ok {
continue
}
b, err := New(dockerCli,
WithName(c.Name),
WithStore(txn),
WithSkippedValidation(),
)
if err != nil {
return nil, err
}
builders = append(builders, b)
}
return builders, nil
}

View File

@@ -1,201 +0,0 @@
package builder
import (
"context"
"github.com/docker/buildx/driver"
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/platformutil"
"github.com/moby/buildkit/util/grpcerrors"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
)
type Node struct {
store.Node
Driver driver.Driver
DriverInfo *driver.Info
Platforms []ocispecs.Platform
ImageOpt imagetools.Opt
ProxyConfig map[string]string
Version string
Err error
}
// Nodes returns nodes for this builder.
func (b *Builder) Nodes() []Node {
return b.nodes
}
// LoadNodes loads and returns nodes for this builder.
// TODO: this should be a method on a Node object and lazy load data for each driver.
func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) {
eg, _ := errgroup.WithContext(ctx)
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
defer func() {
if b.err == nil && err != nil {
b.err = err
}
}()
factory, err := b.Factory(ctx)
if err != nil {
return nil, err
}
imageopt, err := b.ImageOpt()
if err != nil {
return nil, err
}
for i, n := range b.NodeGroup.Nodes {
func(i int, n store.Node) {
eg.Go(func() error {
node := Node{
Node: n,
ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli),
}
defer func() {
b.nodes[i] = node
}()
dockerapi, err := dockerutil.NewClientAPI(b.opts.dockerCli, n.Endpoint)
if err != nil {
node.Err = err
return nil
}
contextStore := b.opts.dockerCli.ContextStore()
var kcc driver.KubeClientConfig
kcc, err = ctxkube.ConfigFromContext(n.Endpoint, contextStore)
if err != nil {
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
// try again with name="default".
// FIXME(@AkihiroSuda): n should retain real context name.
kcc, err = ctxkube.ConfigFromContext("default", contextStore)
if err != nil {
logrus.Error(err)
}
}
tryToUseKubeConfigInCluster := false
if kcc == nil {
tryToUseKubeConfigInCluster = true
} else {
if _, err := kcc.ClientConfig(); err != nil {
tryToUseKubeConfigInCluster = true
}
}
if tryToUseKubeConfigInCluster {
kccInCluster := driver.KubeClientConfigInCluster{}
if _, err := kccInCluster.ClientConfig(); err == nil {
logrus.Debug("using kube config in cluster")
kcc = kccInCluster
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash)
if err != nil {
node.Err = err
return nil
}
node.Driver = d
node.ImageOpt = imageopt
if withData {
if err := node.loadData(ctx); err != nil {
node.Err = err
}
}
return nil
})
}(i, n)
}
if err := eg.Wait(); err != nil {
return nil, err
}
// TODO: This should be done in the routine loading driver data
if withData {
kubernetesDriverCount := 0
for _, d := range b.nodes {
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
kubernetesDriverCount++
}
}
isAllKubernetesDrivers := len(b.nodes) == kubernetesDriverCount
if isAllKubernetesDrivers {
var nodes []Node
var dynamicNodes []store.Node
for _, di := range b.nodes {
// dynamic nodes are used in Kubernetes driver.
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
diClone := di
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.Platforms = pl
}
nodes = append(nodes, di)
}
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
}
}
// not append (remove the static nodes in the store)
b.NodeGroup.Nodes = dynamicNodes
b.nodes = nodes
b.NodeGroup.Dynamic = true
}
}
return b.nodes, nil
}
func (n *Node) loadData(ctx context.Context) error {
if n.Driver == nil {
return nil
}
info, err := n.Driver.Info(ctx)
if err != nil {
return err
}
n.DriverInfo = info
if n.DriverInfo.Status == driver.Running {
driverClient, err := n.Driver.Client(ctx)
if err != nil {
return err
}
workers, err := driverClient.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "listing workers")
}
for _, w := range workers {
n.Platforms = append(n.Platforms, w.Platforms...)
}
n.Platforms = platformutil.Dedupe(n.Platforms)
inf, err := driverClient.Info(ctx)
if err != nil {
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
n.Version, err = n.Driver.Version(ctx)
if err != nil {
return errors.Wrap(err, "getting version")
}
}
} else {
n.Version = inf.BuildkitVersion.Version
}
}
return nil
}

View File

@@ -9,10 +9,7 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/docker/buildx/bake"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli/command"
@@ -74,20 +71,11 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
if in.pull != nil {
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
}
if in.sbom != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.sbom)))
}
if in.provenance != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("provenance", in.provenance)))
}
contextPathHash, _ := os.Getwd()
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
if err != nil {
return err
}
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
defer func() {
if printer != nil {
@@ -98,30 +86,16 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
}
}()
var nodes []builder.Node
dis, err := getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
if err != nil {
return err
}
var files []bake.File
var inp *bake.Input
// instance only needed for reading remote bake files or building
if url != "" || !in.printOnly {
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
return err
}
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return errors.Wrapf(err, "failed to update builder last activity time")
}
nodes, err = b.LoadNodes(ctx, false)
if err != nil {
return err
}
}
if url != "" {
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer)
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
} else {
files, err = bake.ReadLocalFiles(in.files)
}
@@ -131,7 +105,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
// don't forget to update documentation if you add a new
// built-in variable: docs/manuals/bake/file-definition.md#built-in-variables
// built-in variable: docs/guides/bake/file-definition.md#built-in-variables
"BAKE_CMD_CONTEXT": cmdContext,
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
})
@@ -146,11 +120,17 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
}
if in.printOnly {
var defg map[string]*bake.Group
if len(grps) == 1 {
defg = map[string]*bake.Group{
"default": grps[0],
}
}
dt, err := json.MarshalIndent(struct {
Group map[string]*bake.Group `json:"group,omitempty"`
Target map[string]*bake.Target `json:"target"`
}{
grps,
defg,
tgts,
}, "", " ")
if err != nil {
@@ -165,7 +145,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
return nil
}
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
if err != nil {
return wrapBuildError(err, true)
}
@@ -209,8 +189,6 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
commonBuildFlags(&options.commonOptions, flags)

View File

@@ -16,13 +16,9 @@ import (
"github.com/containerd/console"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing"
@@ -55,7 +51,6 @@ type buildOptions struct {
printFunc string
allow []string
attests []string
buildArgs []string
cacheFrom []string
cacheTo []string
@@ -63,7 +58,6 @@ type buildOptions struct {
contexts []string
extraHosts []string
imageIDFile string
invoke string
labels []string
networkMode string
noCacheFilter []string
@@ -76,6 +70,7 @@ type buildOptions struct {
tags []string
target string
ulimits *dockeropts.UlimitOpt
invoke string
commonOptions
}
@@ -86,11 +81,11 @@ type commonOptions struct {
progress string
pull *bool
// golangci-lint#826
// nolint:structcheck
exportPush bool
// nolint:structcheck
exportLoad bool
sbom string
provenance string
}
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
@@ -117,7 +112,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
if in.quiet && in.progress != progress.PrinterModeAuto && in.progress != progress.PrinterModeQuiet {
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
} else if in.quiet {
in.progress = "quiet"
@@ -218,19 +213,8 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
}
}
}
opts.Exports = outputs
inAttests := append([]string{}, in.attests...)
if in.provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", in.provenance))
}
if in.sbom != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("sbom", in.sbom))
}
opts.Attests, err = buildflags.ParseAttests(inAttests)
if err != nil {
return err
}
opts.Exports = outputs
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
if err != nil {
@@ -256,22 +240,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
contextPathHash = in.contextPath
}
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
return err
}
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return errors.Wrapf(err, "failed to update builder last activity time")
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
}
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "")
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, in.invoke != "")
err = wrapBuildError(err, false)
if err != nil {
return err
@@ -288,7 +257,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
return errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
_, rr, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, true)
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, true)
return rr, err
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
if err != nil {
@@ -309,18 +278,20 @@ type nopCloser struct {
func (c nopCloser) Close() error { return nil }
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
if err != nil {
return "", nil, err
}
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
var mu sync.Mutex
var idx int
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
resp, err := build.BuildWithResultHandler(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
@@ -355,20 +326,18 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
}
func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
cfg.Tty = true
if invoke == "default" {
return cfg, nil
}
csvReader := csv.NewReader(strings.NewReader(invoke))
fields, err := csvReader.Read()
if err != nil {
return cfg, err
}
cfg.Tty = true
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
cfg.Cmd = []string{fields[0]}
cfg.Args = []string{fields[0]}
return cfg, nil
}
var entrypoint string
var args []string
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
@@ -378,15 +347,15 @@ func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
value := parts[1]
switch key {
case "args":
cfg.Cmd = append(cfg.Cmd, value) // TODO: support JSON
args = append(args, value) // TODO: support JSON
case "entrypoint":
cfg.Entrypoint = append(cfg.Entrypoint, value) // TODO: support JSON
entrypoint = value // TODO: support JSON
case "env":
cfg.Env = append(cfg.Env, value)
case "user":
cfg.User = &value
cfg.User = value
case "cwd":
cfg.Cwd = &value
cfg.Cwd = value
case "tty":
cfg.Tty, err = strconv.ParseBool(value)
if err != nil {
@@ -396,6 +365,13 @@ func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
return cfg, errors.Errorf("unknown key %q", key)
}
}
cfg.Args = args
if entrypoint != "" {
cfg.Args = append([]string{entrypoint}, cfg.Args...)
}
if len(cfg.Args) == 0 {
cfg.Args = []string{"sh"}
}
return cfg, nil
}
@@ -524,10 +500,6 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.Var(options.ulimits, "ulimit", "Ulimit options")
flags.StringArrayVar(&options.attests, "attest", []string{}, `Attestation parameters (format: "type=sbom,generator=image")`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shortand for "--attest=type=provenance"`)
if isExperimental() {
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build [experimental]")
}
@@ -735,12 +707,3 @@ func isExperimental() bool {
}
return false
}
func updateLastActivity(dockerCli command.Cli, ng *store.NodeGroup) error {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
defer release()
return txn.UpdateLastActivity(ng)
}

View File

@@ -10,17 +10,13 @@ import (
"strings"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/driver"
remoteutil "github.com/docker/buildx/driver/remote/util"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
dopts "github.com/docker/cli/opts"
"github.com/google/shlex"
"github.com/moby/buildkit/util/appcontext"
"github.com/pkg/errors"
@@ -208,7 +204,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
}
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
@@ -238,26 +234,17 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
return err
}
b, err := builder.New(dockerCli,
builder.WithName(ng.Name),
builder.WithStore(txn),
builder.WithSkippedValidation(),
)
if err != nil {
return err
}
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
nodes, err := b.LoadNodes(timeoutCtx, true)
if err != nil {
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
return err
}
for _, node := range nodes {
if err := node.Err; err != nil {
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
for _, info := range ngi.drivers {
if err := info.di.Err; err != nil {
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, info.di.Name, err)
var err2 error
if ngOriginal == nil {
err2 = txn.Remove(ng.Name)
@@ -272,7 +259,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
}
if in.use && ep != "" {
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
current, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
@@ -282,7 +269,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
}
if in.bootstrap {
if _, err = b.Boot(ctx); err != nil {
if _, err = boot(ctx, ngi); err != nil {
return err
}
}
@@ -353,27 +340,3 @@ func csvToMap(in []string) (map[string]string, error) {
}
return m, nil
}
// validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
if err == nil && dem != nil {
if ep == "default" {
return dem.Host, nil
}
return ep, nil
}
h, err := dopts.ParseHost(true, ep)
if err != nil {
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
}
return h, nil
}
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
func validateBuildkitEndpoint(ep string) (string, error) {
if err := remoteutil.IsValidEndpoint(ep); err != nil {
return "", err
}
return ep, nil
}

View File

@@ -8,7 +8,7 @@ import (
"text/tabwriter"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/build"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
@@ -33,29 +33,25 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
return err
}
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
for _, di := range dis {
if di.Err != nil {
return err
}
}
out := make([][]*client.UsageInfo, len(nodes))
out := make([][]*client.UsageInfo, len(dis))
eg, ctx := errgroup.WithContext(ctx)
for i, node := range nodes {
func(i int, node builder.Node) {
for i, di := range dis {
func(i int, di build.DriverInfo) {
eg.Go(func() error {
if node.Driver != nil {
c, err := node.Driver.Client(ctx)
if di.Driver != nil {
c, err := di.Driver.Client(ctx)
if err != nil {
return err
}
@@ -68,7 +64,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
}
return nil
})
}(i, node)
}(i, di)
}
if err := eg.Wait(); err != nil {

View File

@@ -7,7 +7,8 @@ import (
"os"
"strings"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
@@ -89,34 +90,47 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
}
for i, s := range srcs {
if s.Ref == nil {
if s.Ref == nil && s.Desc.MediaType == "" && s.Desc.Digest != "" {
if defaultRepo == nil {
return errors.Errorf("multiple repositories specified, cannot infer repository for %q", args[i])
}
n, err := reference.ParseNormalizedNamed(*defaultRepo)
if err != nil {
return err
}
if s.Desc.MediaType == "" && s.Desc.Digest != "" {
r, err := reference.WithDigest(n, s.Desc.Digest)
if err != nil {
return err
}
srcs[i].Ref = r
sourceRefs = true
} else {
srcs[i].Ref = reference.TagNameOnly(n)
r, err := reference.WithDigest(n, s.Desc.Digest)
if err != nil {
return err
}
srcs[i].Ref = r
sourceRefs = true
}
}
ctx := appcontext.Context()
b, err := builder.New(dockerCli, builder.WithName(in.builder))
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
imageopt, err := b.ImageOpt()
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return err
}
@@ -168,10 +182,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
if err != nil {
return err
}
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
eg, _ := errgroup.WithContext(ctx)
pw := progress.WithPrefix(printer, "internal", true)

View File

@@ -1,7 +1,8 @@
package commands
import (
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/cli-docs-tool/annotation"
"github.com/docker/cli/cli"
@@ -24,11 +25,27 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
return errors.Errorf("format and raw cannot be used together")
}
b, err := builder.New(dockerCli, builder.WithName(in.builder))
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
imageopt, err := b.ImageOpt()
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return err
}

View File

@@ -8,7 +8,8 @@ import (
"text/tabwriter"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
@@ -24,46 +25,71 @@ type inspectOptions struct {
func runInspect(dockerCli command.Cli, in inspectOptions) error {
ctx := appcontext.Context()
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithSkippedValidation(),
)
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
if ng == nil {
ng = &store.NodeGroup{
Name: "default",
Nodes: []store.Node{{
Name: "default",
Endpoint: "default",
}},
}
}
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
nodes, err := b.LoadNodes(timeoutCtx, true)
err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
var bootNgi *nginfo
if in.bootstrap {
var ok bool
ok, err = b.Boot(ctx)
ok, err = boot(ctx, ngi)
if err != nil {
return err
}
bootNgi = ngi
if ok {
nodes, err = b.LoadNodes(timeoutCtx, true)
ngi = &nginfo{ng: ng}
err = loadNodeGroupData(ctx, dockerCli, ngi)
}
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "Name:\t%s\n", b.Name)
fmt.Fprintf(w, "Driver:\t%s\n", b.Driver)
if !b.NodeGroup.LastActivity.IsZero() {
fmt.Fprintf(w, "Last Activity:\t%v\n", b.NodeGroup.LastActivity)
}
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
if err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if b.Err() != nil {
fmt.Fprintf(w, "Error:\t%s\n", b.Err().Error())
} else if ngi.err != nil {
fmt.Fprintf(w, "Error:\t%s\n", ngi.err.Error())
}
if err == nil {
fmt.Fprintln(w, "")
fmt.Fprintln(w, "Nodes:")
for i, n := range nodes {
for i, n := range ngi.ng.Nodes {
if i != 0 {
fmt.Fprintln(w, "")
}
@@ -78,17 +104,21 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
}
if err := n.Err; err != nil {
if err := ngi.drivers[i].di.Err; err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if err := ngi.drivers[i].err; err != nil {
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
} else {
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
if len(n.Flags) > 0 {
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
}
if nodes[i].Version != "" {
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
if ngi.drivers[i].version != "" {
fmt.Fprintf(w, "Buildkit:\t%s\n", ngi.drivers[i].version)
}
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
}
}
}

View File

@@ -4,11 +4,12 @@ import (
"context"
"fmt"
"io"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/platformutil"
@@ -31,24 +32,52 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
}
defer release()
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
builders, err := builder.GetBuilders(dockerCli, txn)
if err != nil {
return err
}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
eg, _ := errgroup.WithContext(timeoutCtx)
ll, err := txn.List()
if err != nil {
return err
}
builders := make([]*nginfo, len(ll))
for i, ng := range ll {
builders[i] = &nginfo{ng: ng}
}
contexts, err := dockerCli.ContextStore().List()
if err != nil {
return err
}
sort.Slice(contexts, func(i, j int) bool {
return contexts[i].Name < contexts[j].Name
})
for _, c := range contexts {
ngi := &nginfo{ng: &store.NodeGroup{
Name: c.Name,
Nodes: []store.Node{{
Name: c.Name,
Endpoint: c.Name,
}},
}}
// if a context has the same name as an instance from the store, do not
// add it to the builders list. An instance from the store takes
// precedence over context builders.
if hasNodeGroup(builders, ngi) {
continue
}
builders = append(builders, ngi)
}
eg, _ := errgroup.WithContext(ctx)
for _, b := range builders {
func(b *builder.Builder) {
func(b *nginfo) {
eg.Go(func() error {
_, _ = b.LoadNodes(timeoutCtx, true)
err = loadNodeGroupData(ctx, dockerCli, b)
if b.err == nil && err != nil {
b.err = err
}
return nil
})
}(b)
@@ -58,15 +87,29 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
return err
}
currentName := "default"
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
if current != nil {
currentName = current.Name
if current.Name == "default" {
currentName = current.Nodes[0].Endpoint
}
}
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
currentSet := false
printErr := false
for _, b := range builders {
if current.Name == b.Name {
b.Name += " *"
if !currentSet && b.ng.Name == currentName {
b.ng.Name += " *"
currentSet = true
}
if ok := printBuilder(w, b); !ok {
if ok := printngi(w, b); !ok {
printErr = true
}
}
@@ -76,12 +119,19 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
if printErr {
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
for _, b := range builders {
if b.Err() != nil {
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error()))
if b.err != nil {
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.ng.Name, strings.TrimSpace(b.err.Error()))
} else {
for _, d := range b.Nodes() {
if d.Err != nil {
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error()))
for idx, n := range b.ng.Nodes {
d := b.drivers[idx]
var nodeErr string
if d.err != nil {
nodeErr = d.err.Error()
} else if d.di.Err != nil {
nodeErr = d.di.Err.Error()
}
if nodeErr != "" {
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.ng.Name, n.Name, strings.TrimSpace(nodeErr))
}
}
}
@@ -91,25 +141,26 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
return nil
}
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) {
func printngi(w io.Writer, ngi *nginfo) (ok bool) {
ok = true
var err string
if b.Err() != nil {
if ngi.err != nil {
ok = false
err = "error"
}
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
if b.Err() == nil {
for _, n := range b.Nodes() {
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", ngi.ng.Name, ngi.ng.Driver, err)
if ngi.err == nil {
for idx, n := range ngi.ng.Nodes {
d := ngi.drivers[idx]
var status string
if n.DriverInfo != nil {
status = n.DriverInfo.Status.String()
if d.info != nil {
status = d.info.Status.String()
}
if n.Err != nil {
if d.err != nil || d.di.Err != nil {
ok = false
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
} else {
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, d.version, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
}
}
}

View File

@@ -7,7 +7,7 @@ import (
"text/tabwriter"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/build"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
@@ -54,18 +54,14 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
return nil
}
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
for _, di := range dis {
if di.Err != nil {
return err
}
}
@@ -94,11 +90,11 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
}()
eg, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
func(node builder.Node) {
for _, di := range dis {
func(di build.DriverInfo) {
eg.Go(func() error {
if node.Driver != nil {
c, err := node.Driver.Client(ctx)
if di.Driver != nil {
c, err := di.Driver.Client(ctx)
if err != nil {
return err
}
@@ -113,7 +109,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
}
return nil
})
}(node)
}(di)
}
if err := eg.Wait(); err != nil {

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"time"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli"
@@ -45,33 +44,41 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
return rmAllInactive(ctx, txn, dockerCli, in)
}
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithStore(txn),
builder.WithSkippedValidation(),
)
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
if ng == nil {
return nil
}
ctxbuilders, err := dockerCli.ContextStore().List()
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
for _, cb := range ctxbuilders {
if ng.Driver == "docker" && len(ng.Nodes) == 1 && ng.Nodes[0].Endpoint == cb.Name {
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb.Name)
}
}
if cb := b.ContextName(); cb != "" {
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
}
err1 := rm(ctx, nodes, in)
if err := txn.Remove(b.Name); err != nil {
err1 := rm(ctx, dockerCli, in, ng)
if err := txn.Remove(ng.Name); err != nil {
return err
}
if err1 != nil {
return err1
}
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", ng.Name)
return nil
}
@@ -103,56 +110,61 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd
}
func rm(ctx context.Context, nodes []builder.Node, in rmOptions) (err error) {
for _, node := range nodes {
if node.Driver == nil {
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
if err != nil {
return err
}
for _, di := range dis {
if di.Driver == nil {
continue
}
// Do not stop the buildkitd daemon when --keep-daemon is provided
if !in.keepDaemon {
if err := node.Driver.Stop(ctx, true); err != nil {
if err := di.Driver.Stop(ctx, true); err != nil {
return err
}
}
if err := node.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
return err
}
if node.Err != nil {
err = node.Err
if di.Err != nil {
err = di.Err
}
}
return err
}
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
builders, err := builder.GetBuilders(dockerCli, txn)
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
ll, err := txn.List()
if err != nil {
return err
}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
builders := make([]*nginfo, len(ll))
for i, ng := range ll {
builders[i] = &nginfo{ng: ng}
}
eg, _ := errgroup.WithContext(timeoutCtx)
eg, _ := errgroup.WithContext(ctx)
for _, b := range builders {
func(b *builder.Builder) {
func(b *nginfo) {
eg.Go(func() error {
nodes, err := b.LoadNodes(timeoutCtx, true)
if err != nil {
return errors.Wrapf(err, "cannot load %s", b.Name)
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
}
if cb := b.ContextName(); cb != "" {
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
}
if b.Dynamic {
if b.ng.Dynamic {
return nil
}
if b.Inactive() {
rmerr := rm(ctx, nodes, in)
if err := txn.Remove(b.Name); err != nil {
if b.inactive() {
rmerr := rm(ctx, dockerCli, in, b.ng)
if err := txn.Remove(b.ng.Name); err != nil {
return err
}
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.ng.Name)
return rmerr
}
return nil

View File

@@ -3,7 +3,8 @@ package commands
import (
"context"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext"
@@ -17,19 +18,32 @@ type stopOptions struct {
func runStop(dockerCli command.Cli, in stopOptions) error {
ctx := appcontext.Context()
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithSkippedValidation(),
)
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
defer release()
if in.builder != "" {
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
if err := stop(ctx, dockerCli, ng); err != nil {
return err
}
return nil
}
return stop(ctx, nodes)
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
if ng != nil {
return stop(ctx, dockerCli, ng)
}
return stopCurrent(ctx, dockerCli)
}
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
@@ -51,15 +65,37 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd
}
func stop(ctx context.Context, nodes []builder.Node) (err error) {
for _, node := range nodes {
if node.Driver != nil {
if err := node.Driver.Stop(ctx, true); err != nil {
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
if err != nil {
return err
}
for _, di := range dis {
if di.Driver != nil {
if err := di.Driver.Stop(ctx, true); err != nil {
return err
}
}
if node.Err != nil {
err = node.Err
if di.Err != nil {
err = di.Err
}
}
return err
}
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
if err != nil {
return err
}
for _, di := range dis {
if di.Driver != nil {
if err := di.Driver.Stop(ctx, true); err != nil {
return err
}
}
if di.Err != nil {
err = di.Err
}
}
return err

View File

@@ -4,7 +4,6 @@ import (
"os"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
@@ -30,7 +29,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Errorf("run `docker context use default` to switch to default context")
}
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}
@@ -53,7 +52,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Wrapf(err, "failed to find instance %q", in.builder)
}
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
if err != nil {
return err
}

487
commands/util.go Normal file
View File

@@ -0,0 +1,487 @@
package commands
import (
"context"
"net/url"
"os"
"strings"
"github.com/docker/buildx/build"
"github.com/docker/buildx/driver"
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
remoteutil "github.com/docker/buildx/driver/remote/util"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker"
ctxstore "github.com/docker/cli/cli/context/store"
dopts "github.com/docker/cli/opts"
dockerclient "github.com/docker/docker/client"
"github.com/moby/buildkit/util/grpcerrors"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"k8s.io/client-go/tools/clientcmd"
)
// validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
de, err := storeutil.GetDockerEndpoint(dockerCli, ep)
if err == nil && de != "" {
if ep == "default" {
return de, nil
}
return ep, nil
}
h, err := dopts.ParseHost(true, ep)
if err != nil {
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
}
return h, nil
}
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
func validateBuildkitEndpoint(ep string) (string, error) {
if err := remoteutil.IsValidEndpoint(ep); err != nil {
return "", err
}
return ep, nil
}
// driversForNodeGroup returns drivers for a nodegroup instance
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
eg, _ := errgroup.WithContext(ctx)
dis := make([]build.DriverInfo, len(ng.Nodes))
var f driver.Factory
if ng.Driver != "" {
var err error
f, err = driver.GetFactory(ng.Driver, true)
if err != nil {
return nil, err
}
} else {
// empty driver means nodegroup was implicitly created as a default
// driver for a docker context and allows falling back to a
// docker-container driver for older daemon that doesn't support
// buildkit (< 18.06).
ep := ng.Nodes[0].Endpoint
dockerapi, err := clientForEndpoint(dockerCli, ep)
if err != nil {
return nil, err
}
// check if endpoint is healthy is needed to determine the driver type.
// if this fails then can't continue with driver selection.
if _, err = dockerapi.Ping(ctx); err != nil {
return nil, err
}
f, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
if err != nil {
return nil, err
}
ng.Driver = f.Name()
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return nil, err
}
for i, n := range ng.Nodes {
func(i int, n store.Node) {
eg.Go(func() error {
di := build.DriverInfo{
Name: n.Name,
Platform: n.Platforms,
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
}
defer func() {
dis[i] = di
}()
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
if err != nil {
di.Err = err
return nil
}
// TODO: replace the following line with dockerclient.WithAPIVersionNegotiation option in clientForEndpoint
dockerapi.NegotiateAPIVersion(ctx)
contextStore := dockerCli.ContextStore()
var kcc driver.KubeClientConfig
kcc, err = configFromContext(n.Endpoint, contextStore)
if err != nil {
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
// try again with name="default".
// FIXME: n should retain real context name.
kcc, err = configFromContext("default", contextStore)
if err != nil {
logrus.Error(err)
}
}
tryToUseKubeConfigInCluster := false
if kcc == nil {
tryToUseKubeConfigInCluster = true
} else {
if _, err := kcc.ClientConfig(); err != nil {
tryToUseKubeConfigInCluster = true
}
}
if tryToUseKubeConfigInCluster {
kccInCluster := driver.KubeClientConfigInCluster{}
if _, err := kccInCluster.ClientConfig(); err == nil {
logrus.Debug("using kube config in cluster")
kcc = kccInCluster
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
if err != nil {
di.Err = err
return nil
}
di.Driver = d
di.ImageOpt = imageopt
return nil
})
}(i, n)
}
if err := eg.Wait(); err != nil {
return nil, err
}
return dis, nil
}
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") {
u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
}
rules := clientcmd.NewDefaultClientConfigLoadingRules()
apiConfig, err := rules.Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
}
return ctxkube.ConfigFromContext(endpointName, s)
}
// clientForEndpoint returns a docker client for an endpoint
func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClient, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
dep, ok := l.Endpoints["docker"]
if !ok {
return nil, errors.Errorf("context %q does not have a Docker endpoint", name)
}
epm, ok := dep.(docker.EndpointMeta)
if !ok {
return nil, errors.Errorf("endpoint %q is not of type EndpointMeta, %T", dep, dep)
}
ep, err := docker.WithTLSData(dockerCli.ContextStore(), name, epm)
if err != nil {
return nil, err
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
}
ep := docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: name,
},
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
var defaultOnly bool
if instance == "default" && instance != dockerCli.CurrentContext() {
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
}
if instance == "default" || instance == dockerCli.CurrentContext() {
instance = ""
defaultOnly = true
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == instance {
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
}
}
if instance != "" {
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
}
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
}
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
ng, err := txn.NodeGroupByName(instance)
if err != nil {
return nil, err
}
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
// getDefaultDrivers returns drivers based on current cli config
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
if !defaultOnly {
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return nil, err
}
if ng != nil {
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
if err != nil {
return nil, err
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, "", dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
if err != nil {
return nil, err
}
return []build.DriverInfo{
{
Name: "default",
Driver: d,
ImageOpt: imageopt,
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
},
}, nil
}
func loadInfoData(ctx context.Context, d *dinfo) error {
if d.di.Driver == nil {
return nil
}
info, err := d.di.Driver.Info(ctx)
if err != nil {
return err
}
d.info = info
if info.Status == driver.Running {
c, err := d.di.Driver.Client(ctx)
if err != nil {
return err
}
workers, err := c.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "listing workers")
}
for _, w := range workers {
d.platforms = append(d.platforms, w.Platforms...)
}
d.platforms = platformutil.Dedupe(d.platforms)
inf, err := c.Info(ctx)
if err != nil {
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
d.version, err = d.di.Driver.Version(ctx)
if err != nil {
return errors.Wrap(err, "getting version")
}
}
} else {
d.version = inf.BuildkitVersion.Version
}
}
return nil
}
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
eg, _ := errgroup.WithContext(ctx)
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
if err != nil {
return err
}
ngi.drivers = make([]dinfo, len(dis))
for i, di := range dis {
d := di
ngi.drivers[i].di = &d
func(d *dinfo) {
eg.Go(func() error {
if err := loadInfoData(ctx, d); err != nil {
d.err = err
}
return nil
})
}(&ngi.drivers[i])
}
if eg.Wait(); err != nil {
return err
}
kubernetesDriverCount := 0
for _, di := range ngi.drivers {
if di.info != nil && len(di.info.DynamicNodes) > 0 {
kubernetesDriverCount++
}
}
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
if isAllKubernetesDrivers {
var drivers []dinfo
var dynamicNodes []store.Node
for _, di := range ngi.drivers {
// dynamic nodes are used in Kubernetes driver.
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
if di.info != nil && len(di.info.DynamicNodes) > 0 {
for i := 0; i < len(di.info.DynamicNodes); i++ {
// all []dinfo share *build.DriverInfo and *driver.Info
diClone := di
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.platforms = pl
}
drivers = append(drivers, di)
}
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
}
}
// not append (remove the static nodes in the store)
ngi.ng.Nodes = dynamicNodes
ngi.drivers = drivers
ngi.ng.Dynamic = true
}
return nil
}
func hasNodeGroup(list []*nginfo, ngi *nginfo) bool {
for _, l := range list {
if ngi.ng.Name == l.ng.Name {
return true
}
}
return false
}
func dockerAPI(dockerCli command.Cli) *api {
return &api{dockerCli: dockerCli}
}
type api struct {
dockerCli command.Cli
}
func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
if name == "" {
name = a.dockerCli.CurrentContext()
}
return clientForEndpoint(a.dockerCli, name)
}
type dinfo struct {
di *build.DriverInfo
info *driver.Info
platforms []specs.Platform
version string
err error
}
type nginfo struct {
ng *store.NodeGroup
drivers []dinfo
err error
}
// inactive checks if all nodes are inactive for this builder
func (n *nginfo) inactive() bool {
for idx := range n.ng.Nodes {
d := n.drivers[idx]
if d.info != nil && d.info.Status == driver.Running {
return false
}
}
return true
}
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
toBoot := make([]int, 0, len(ngi.drivers))
for i, d := range ngi.drivers {
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
continue
}
if d.info.Status != driver.Running {
toBoot = append(toBoot, i)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, "auto")
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
if err != nil {
ngi.drivers[idx].err = err
}
return nil
})
}(idx)
}
err := eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}

View File

@@ -1,14 +1,17 @@
variable "GO_VERSION" {
default = "1.19"
default = "1.18"
}
variable "BIN_OUT" {
default = "./bin"
}
variable "RELEASE_OUT" {
default = "./release-out"
}
variable "DOCS_FORMATS" {
default = "md"
}
variable "DESTDIR" {
default = "./bin"
}
# Special target: https://github.com/docker/metadata-action#bake-definition
// Special target: https://github.com/docker/metadata-action#bake-definition
target "meta-helper" {
tags = ["docker/buildx-bin:local"]
}
@@ -94,13 +97,13 @@ target "mod-outdated" {
target "test" {
inherits = ["_common"]
target = "test-coverage"
output = ["${DESTDIR}/coverage"]
output = ["./coverage"]
}
target "binaries" {
inherits = ["_common"]
target = "binaries"
output = ["${DESTDIR}/build"]
output = [BIN_OUT]
platforms = ["local"]
}
@@ -124,7 +127,7 @@ target "binaries-cross" {
target "release" {
inherits = ["binaries-cross"]
target = "release"
output = ["${DESTDIR}/release"]
output = [RELEASE_OUT]
}
target "image" {

View File

@@ -0,0 +1,74 @@
# Defining additional build contexts and linking targets
In addition to the main `context` key that defines the build context each target
can also define additional named contexts with a map defined with key `contexts`.
These values map to the `--build-context` flag in the [build command](https://docs.docker.com/engine/reference/commandline/buildx_build/#build-context).
Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag.
The value can be a local source directory, container image (with `docker-image://` prefix),
Git URL, HTTP URL or a name of another target in the Bake file (with `target:` prefix).
## Pinning alpine image
```dockerfile
# syntax=docker/dockerfile:1
FROM alpine
RUN echo "Hello world"
```
```hcl
# docker-bake.hcl
target "app" {
contexts = {
alpine = "docker-image://alpine:3.13"
}
}
```
## Using a secondary source directory
```dockerfile
# syntax=docker/dockerfile:1
FROM scratch AS src
FROM golang
COPY --from=src . .
```
```hcl
# docker-bake.hcl
target "app" {
contexts = {
src = "../path/to/source"
}
}
```
## Using a result of one target as a base image in another target
To use a result of one target as a build context of another, specity the target
name with `target:` prefix.
```dockerfile
# syntax=docker/dockerfile:1
FROM baseapp
RUN echo "Hello world"
```
```hcl
# docker-bake.hcl
target "base" {
dockerfile = "baseapp.Dockerfile"
}
target "app" {
contexts = {
baseapp = "target:base"
}
}
```
Please note that in most cases you should just use a single multi-stage
Dockerfile with multiple targets for similar behavior. This case is recommended
when you have multiple Dockerfiles that can't be easily merged into one.

View File

@@ -0,0 +1,270 @@
# Building from Compose file
## Specification
Bake uses the [compose-spec](https://docs.docker.com/compose/compose-file/) to
parse a compose file and translate each service to a [target](file-definition.md#target).
```yaml
# docker-compose.yml
services:
webapp-dev:
build: &build-dev
dockerfile: Dockerfile.webapp
tags:
- docker.io/username/webapp:latest
cache_from:
- docker.io/username/webapp:cache
cache_to:
- docker.io/username/webapp:cache
webapp-release:
build:
<<: *build-dev
x-bake:
platforms:
- linux/amd64
- linux/arm64
db:
image: docker.io/username/db
build:
dockerfile: Dockerfile.db
```
```console
$ docker buildx bake --print
```
```json
{
"group": {
"default": {
"targets": [
"db",
"webapp-dev",
"webapp-release"
]
}
},
"target": {
"db": {
"context": ".",
"dockerfile": "Dockerfile.db",
"tags": [
"docker.io/username/db"
]
},
"webapp-dev": {
"context": ".",
"dockerfile": "Dockerfile.webapp",
"tags": [
"docker.io/username/webapp:latest"
],
"cache-from": [
"docker.io/username/webapp:cache"
],
"cache-to": [
"docker.io/username/webapp:cache"
]
},
"webapp-release": {
"context": ".",
"dockerfile": "Dockerfile.webapp",
"tags": [
"docker.io/username/webapp:latest"
],
"cache-from": [
"docker.io/username/webapp:cache"
],
"cache-to": [
"docker.io/username/webapp:cache"
],
"platforms": [
"linux/amd64",
"linux/arm64"
]
}
}
}
```
Unlike the [HCL format](file-definition.md#hcl-definition), there are some
limitations with the compose format:
* Specifying variables or global scope attributes is not yet supported
* `inherits` service field is not supported, but you can use [YAML anchors](https://docs.docker.com/compose/compose-file/#fragments) to reference other services like the example above
## `.env` file
You can declare default environment variables in an environment file named
`.env`. This file will be loaded from the current working directory,
where the command is executed and applied to compose definitions passed
with `-f`.
```yaml
# docker-compose.yml
services:
webapp:
image: docker.io/username/webapp:${TAG:-v1.0.0}
build:
dockerfile: Dockerfile
```
```
# .env
TAG=v1.1.0
```
```console
$ docker buildx bake --print
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"docker.io/username/webapp:v1.1.0"
]
}
}
}
```
> **Note**
>
> System environment variables take precedence over environment variables
> in `.env` file.
## Extension field with `x-bake`
Even if some fields are not (yet) available in the compose specification, you
can use the [special extension](https://docs.docker.com/compose/compose-file/#extension)
field `x-bake` in your compose file to evaluate extra fields:
```yaml
# docker-compose.yml
services:
addon:
image: ct-addon:bar
build:
context: .
dockerfile: ./Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
tags:
- ct-addon:foo
- ct-addon:alp
platforms:
- linux/amd64
- linux/arm64
cache-from:
- user/app:cache
- type=local,src=path/to/cache
cache-to:
- type=local,dest=path/to/cache
pull: true
aws:
image: ct-fake-aws:bar
build:
dockerfile: ./aws.Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
secret:
- id=mysecret,src=./secret
- id=mysecret2,src=./secret2
platforms: linux/arm64
output: type=docker
no-cache: true
```
```console
$ docker buildx bake --print
```
```json
{
"group": {
"default": {
"targets": [
"aws",
"addon"
]
}
},
"target": {
"addon": {
"context": ".",
"dockerfile": "./Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-addon:foo",
"ct-addon:alp"
],
"cache-from": [
"user/app:cache",
"type=local,src=path/to/cache"
],
"cache-to": [
"type=local,dest=path/to/cache"
],
"platforms": [
"linux/amd64",
"linux/arm64"
],
"pull": true
},
"aws": {
"context": ".",
"dockerfile": "./aws.Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-fake-aws:bar"
],
"secret": [
"id=mysecret,src=./secret",
"id=mysecret2,src=./secret2"
],
"platforms": [
"linux/arm64"
],
"output": [
"type=docker"
],
"no-cache": true
}
}
}
```
Complete list of valid fields for `x-bake`:
* `cache-from`
* `cache-to`
* `contexts`
* `no-cache`
* `no-cache-filter`
* `output`
* `platforms`
* `pull`
* `secret`
* `ssh`
* `tags`

View File

@@ -0,0 +1,216 @@
# Configuring builds
Bake supports loading build definition from files, but sometimes you need even
more flexibility to configure this definition.
For this use case, you can define variables inside the bake files that can be
set by the user with environment variables or by [attribute definitions](#global-scope-attributes)
in other bake files. If you wish to change a specific value for a single
invocation you can use the `--set` flag [from the command line](#from-command-line).
## Global scope attributes
You can define global scope attributes in HCL/JSON and use them for code reuse
and setting values for variables. This means you can do a "data-only" HCL file
with the values you want to set/override and use it in the list of regular
output files.
```hcl
# docker-bake.hcl
variable "FOO" {
default = "abc"
}
target "app" {
args = {
v1 = "pre-${FOO}"
}
}
```
You can use this file directly:
```console
$ docker buildx bake --print app
```
```json
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-abc"
}
}
}
}
```
Or create an override configuration file:
```hcl
# env.hcl
WHOAMI="myuser"
FOO="def-${WHOAMI}"
```
And invoke bake together with both of the files:
```console
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
```
```json
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-def-myuser"
}
}
}
}
```
## From command line
You can also override target configurations from the command line with the
[`--set` flag](https://docs.docker.com/engine/reference/commandline/buildx_bake/#set):
```hcl
# docker-bake.hcl
target "app" {
args = {
mybuildarg = "foo"
}
}
```
```console
$ docker buildx bake --set app.args.mybuildarg=bar --set app.platform=linux/arm64 app --print
```
```json
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"mybuildarg": "bar"
},
"platforms": [
"linux/arm64"
]
}
}
}
```
Pattern matching syntax defined in [https://golang.org/pkg/path/#Match](https://golang.org/pkg/path/#Match)
is also supported:
```console
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with "foo"
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with "foo"
```
Complete list of overridable fields:
* `args`
* `cache-from`
* `cache-to`
* `context`
* `dockerfile`
* `labels`
* `no-cache`
* `output`
* `platform`
* `pull`
* `secrets`
* `ssh`
* `tags`
* `target`
## Using variables in variables across files
When multiple files are specified, one file can use variables defined in
another file.
```hcl
# docker-bake1.hcl
variable "FOO" {
default = upper("${BASE}def")
}
variable "BAR" {
default = "-${FOO}-"
}
target "app" {
args = {
v1 = "pre-${BAR}"
}
}
```
```hcl
# docker-bake2.hcl
variable "BASE" {
default = "abc"
}
target "app" {
args = {
v2 = "${FOO}-post"
}
}
```
```console
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
```
```json
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre--ABCDEF-",
"v2": "ABCDEF-post"
}
}
}
}
```

View File

@@ -0,0 +1,440 @@
# Bake file definition
`buildx bake` supports HCL, JSON and Compose file format for defining build
[groups](#group), [targets](#target) as well as [variables](#variable) and
[functions](#functions). It looks for build definition files in the current
directory in the following order:
* `docker-compose.yml`
* `docker-compose.yaml`
* `docker-bake.json`
* `docker-bake.override.json`
* `docker-bake.hcl`
* `docker-bake.override.hcl`
## Specification
Inside a bake file you can declare group, target and variable blocks to define
project specific reusable build flows.
### Target
A target reflects a single docker build invocation with the same options that
you would specify for `docker build`:
```hcl
# docker-bake.hcl
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
}
```
```console
$ docker buildx bake webapp-dev
```
> **Note**
>
> In the case of compose files, each service corresponds to a target.
> If compose service name contains a dot it will be replaced with an underscore.
Complete list of valid target fields available for [HCL](#hcl-definition) and
[JSON](#json-definition) definitions:
| Name | Type | Description |
|---------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------|
| `inherits` | List | [Inherit build options](#merging-and-inheritance) from other targets |
| `args` | Map | Set build-time variables (same as [`--build-arg` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `cache-from` | List | External cache sources (same as [`--cache-from` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `cache-to` | List | Cache export destinations (same as [`--cache-to` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `context` | String | Set of files located in the specified path or URL |
| `contexts` | Map | Additional build contexts (same as [`--build-context` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `dockerfile` | String | Name of the Dockerfile (same as [`--file` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `dockerfile-inline` | String | Inline Dockerfile content |
| `labels` | Map | Set metadata for an image (same as [`--label` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `no-cache` | Bool | Do not use cache when building the image (same as [`--no-cache` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `no-cache-filter` | List | Do not cache specified stages (same as [`--no-cache-filter` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `output` | List | Output destination (same as [`--output` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `platforms` | List | Set target platforms for build (same as [`--platform` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `pull` | Bool | Always attempt to pull all referenced images (same as [`--pull` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `secret` | List | Secret to expose to the build (same as [`--secret` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `ssh` | List | SSH agent socket or keys to expose to the build (same as [`--ssh` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `tags` | List | Name and optionally a tag in the format `name:tag` (same as [`--tag` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
| `target` | String | Set the target build stage to build (same as [`--target` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
### Group
A group is a grouping of targets:
```hcl
# docker-bake.hcl
group "build" {
targets = ["db", "webapp-dev"]
}
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
}
target "db" {
dockerfile = "Dockerfile.db"
tags = ["docker.io/username/db"]
}
```
```console
$ docker buildx bake build
```
### Variable
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
the HCL file format also supports variable block definitions. These can be used
to define variables with values provided by the current environment, or a
default value when unset:
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:${TAG}"]
}
```
```console
$ docker buildx bake webapp-dev # will use the default value "latest"
$ TAG=dev docker buildx bake webapp-dev # will use the TAG environment variable value
```
> **Tip**
>
> See also the [Configuring builds](configuring-build.md) page for advanced usage.
### Functions
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
are available for use in HCL files:
```hcl
# docker-bake.hcl
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
args = {
buildno = "${add(123, 1)}"
}
}
```
In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
are also supported:
```hcl
# docker-bake.hcl
function "increment" {
params = [number]
result = number + 1
}
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
args = {
buildno = "${increment(123)}"
}
}
```
> **Note**
>
> See [User defined HCL functions](hcl-funcs.md) page for more details.
## Built-in variables
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
from a bake file that has been [imported remotely](file-definition.md#remote-definition).
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
specification (e.g. `linux/amd64`).
## Merging and inheritance
Multiple files can include the same target and final build options will be
determined by merging them together:
```hcl
# docker-bake.hcl
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
}
```
```hcl
# docker-bake2.hcl
target "webapp-dev" {
tags = ["docker.io/username/webapp:dev"]
}
```
```console
$ docker buildx bake -f docker-bake.hcl -f docker-bake2.hcl webapp-dev
```
A group can specify its list of targets with the `targets` option. A target can
inherit build options by setting the `inherits` option to the list of targets or
groups to inherit from:
```hcl
# docker-bake.hcl
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:${TAG}"]
}
target "webapp-release" {
inherits = ["webapp-dev"]
platforms = ["linux/amd64", "linux/arm64"]
}
```
## `default` target/group
When you invoke `bake` you specify what targets/groups you want to build. If no
arguments is specified, the group/target named `default` will be built:
```hcl
# docker-bake.hcl
target "default" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:latest"]
}
```
```console
$ docker buildx bake
```
## Definitions
### HCL definition
HCL definition file is recommended as its experience is more aligned with buildx UX
and also allows better code reuse, different target groups and extended features.
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
group "default" {
targets = ["db", "webapp-dev"]
}
target "webapp-dev" {
dockerfile = "Dockerfile.webapp"
tags = ["docker.io/username/webapp:${TAG}"]
}
target "webapp-release" {
inherits = ["webapp-dev"]
platforms = ["linux/amd64", "linux/arm64"]
}
target "db" {
dockerfile = "Dockerfile.db"
tags = ["docker.io/username/db"]
}
```
### JSON definition
```json
{
"variable": {
"TAG": {
"default": "latest"
}
},
"group": {
"default": {
"targets": [
"db",
"webapp-dev"
]
}
},
"target": {
"webapp-dev": {
"dockerfile": "Dockerfile.webapp",
"tags": [
"docker.io/username/webapp:${TAG}"
]
},
"webapp-release": {
"inherits": [
"webapp-dev"
],
"platforms": [
"linux/amd64",
"linux/arm64"
]
},
"db": {
"dockerfile": "Dockerfile.db",
"tags": [
"docker.io/username/db"
]
}
}
}
```
### Compose file
```yaml
# docker-compose.yml
services:
webapp:
image: docker.io/username/webapp:latest
build:
dockerfile: Dockerfile.webapp
db:
image: docker.io/username/db
build:
dockerfile: Dockerfile.db
```
> **Note**
>
> See [Building from Compose file](compose-file.md) page for more details.
## Remote definition
You can also build bake files directly from a remote Git repository or HTTPS URL:
```console
$ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
#1 [internal] load git source https://github.com/docker/cli.git#v20.10.11
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
#1 2.022 From https://github.com/docker/cli
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
#1 DONE 2.9s
```
```json
{
"group": {
"default": {
"targets": [
"binary"
]
}
},
"target": {
"binary": {
"context": "https://github.com/docker/cli.git#v20.10.11",
"dockerfile": "Dockerfile",
"args": {
"BASE_VARIANT": "alpine",
"GO_STRIP": "",
"VERSION": ""
},
"target": "binary",
"platforms": [
"local"
],
"output": [
"build"
]
}
}
}
```
As you can see the context is fixed to `https://github.com/docker/cli.git` even if
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
in the definition.
If you want to access the main context for bake command from a bake file
that has been imported remotely, you can use the [`BAKE_CMD_CONTEXT` built-in var](#built-in-variables).
```console
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
```
```hcl
target "default" {
context = BAKE_CMD_CONTEXT
dockerfile-inline = <<EOT
FROM alpine
WORKDIR /src
COPY . .
RUN ls -l && stop
EOT
}
```
```console
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --print
```
```json
{
"target": {
"default": {
"context": ".",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
```
```console
$ touch foo bar
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
```
```text
...
> [4/4] RUN ls -l && stop:
#8 0.101 total 0
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
#8 0.102 /bin/sh: stop: not found
```
```console
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11" --print
#1 [internal] load git source https://github.com/tonistiigi/buildx.git#remote-test
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
#1 CACHED
```
```json
{
"target": {
"default": {
"context": "https://github.com/docker/cli.git#v20.10.11",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
```
```console
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11"
```
```text
...
> [4/4] RUN ls -l && stop:
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
#8 0.136 /bin/sh: stop: not found
```

View File

@@ -0,0 +1,327 @@
# User defined HCL functions
## Using interpolation to tag an image with the git sha
As shown in the [File definition](file-definition.md#variable) page, `bake`
supports variable blocks which are assigned to matching environment variables
or default values:
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
tags = ["docker.io/username/webapp:${TAG}"]
}
```
alternatively, in json format:
```json
{
"variable": {
"TAG": {
"default": "latest"
}
},
"group": {
"default": {
"targets": ["webapp"]
}
},
"target": {
"webapp": {
"tags": ["docker.io/username/webapp:${TAG}"]
}
}
}
```
```console
$ docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"docker.io/username/webapp:latest"
]
}
}
}
```
```console
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"docker.io/username/webapp:985e9e9"
]
}
}
}
```
## Using the `add` function
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
Here we are using the `add` function.
```hcl
# docker-bake.hcl
variable "TAG" {
default = "latest"
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
args = {
buildno = "${add(123, 1)}"
}
}
```
```console
$ docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
}
}
}
}
```
## Defining an `increment` function
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
The following example defines a simple an `increment` function.
```hcl
# docker-bake.hcl
function "increment" {
params = [number]
result = number + 1
}
group "default" {
targets = ["webapp"]
}
target "webapp" {
args = {
buildno = "${increment(123)}"
}
}
```
```console
$ docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
}
}
}
}
```
## Only adding tags if a variable is not empty using an `notequal`
Here we are using the conditional `notequal` function which is just for
symmetry with the `equal` one.
```hcl
# docker-bake.hcl
variable "TAG" {default="" }
group "default" {
targets = [
"webapp",
]
}
target "webapp" {
context="."
dockerfile="Dockerfile"
tags = [
"my-image:latest",
notequal("",TAG) ? "my-image:${TAG}": "",
]
}
```
```console
$ docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"my-image:latest"
]
}
}
}
```
## Using variables in functions
You can refer variables to other variables like the target blocks can. Stdlib
functions can also be called but user functions can't at the moment.
```hcl
# docker-bake.hcl
variable "REPO" {
default = "user/repo"
}
function "tag" {
params = [tag]
result = ["${REPO}:${tag}"]
}
target "webapp" {
tags = tag("v1")
}
```
```console
$ docker buildx bake --print webapp
```
```json
{
"group": {
"default": {
"targets": [
"webapp"
]
}
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"user/repo:v1"
]
}
}
}
```
## Using typed variables
Non-string variables are also accepted. The value passed with env is parsed
into suitable type first.
```hcl
# docker-bake.hcl
variable "FOO" {
default = 3
}
variable "IS_FOO" {
default = true
}
target "app" {
args = {
v1 = FOO > 5 ? "higher" : "lower"
v2 = IS_FOO ? "yes" : "no"
}
}
```
```console
$ docker buildx bake --print app
```
```json
{
"group": {
"default": {
"targets": [
"app"
]
}
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "lower",
"v2": "yes"
}
}
}
}
```

36
docs/guides/bake/index.md Normal file
View File

@@ -0,0 +1,36 @@
# High-level build options with Bake
> This command is experimental.
>
> The design of bake is in early stages, and we are looking for [feedback from users](https://github.com/docker/buildx/issues).
{: .experimental }
Buildx also aims to provide support for high-level build concepts that go beyond
invoking a single build command. We want to support building all the images in
your application together and let the users define project specific reusable
build flows that can then be easily invoked by anyone.
[BuildKit](https://github.com/moby/buildkit) efficiently handles multiple
concurrent build requests and de-duplicating work. The build commands can be
combined with general-purpose command runners (for example, `make`). However,
these tools generally invoke builds in sequence and therefore cannot leverage
the full potential of BuildKit parallelization, or combine BuildKit's output
for the user. For this use case, we have added a command called
[`docker buildx bake`](https://docs.docker.com/engine/reference/commandline/buildx_bake/).
The `bake` command supports building images from HCL, JSON and Compose files.
This is similar to [`docker compose build`](https://docs.docker.com/compose/reference/build/),
but allowing all the services to be built concurrently as part of a single
request. If multiple files are specified they are all read and configurations are
combined.
We recommend using HCL files as its experience is more aligned with buildx UX
and also allows better code reuse, different target groups and extended features.
## Next steps
* [File definition](file-definition.md)
* [Configuring builds](configuring-build.md)
* [User defined HCL functions](hcl-funcs.md)
* [Defining additional build contexts and linking targets](build-contexts.md)
* [Building from Compose file](compose-file.md)

View File

@@ -40,7 +40,7 @@ jobs:
In this example we are also using 3 other actions:
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
default the `docker-container` [builder driver](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
default the `docker-container` [builder driver](../reference/buildx_create.md#driver).
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
to add emulation support with QEMU to be able to build against more platforms.

View File

@@ -0,0 +1,75 @@
# Docker container driver
The buildx docker-container driver allows creation of a managed and
customizable BuildKit environment inside a dedicated Docker container.
Using the docker-container driver has a couple of advantages over the basic
docker driver. Firstly, we can manually override the version of buildkit to
use, meaning that we can access the latest and greatest features as soon as
they're released, instead of waiting to upgrade to a newer version of Docker.
Additionally, we can access more complex features like multi-architecture
builds and the more advanced cache exporters, which are currently unsupported
in the default docker driver.
We can easily create a new builder that uses the docker-container driver:
```console
$ docker buildx create --name container --driver docker-container
container
```
We should then be able to see it on our list of available builders:
```console
$ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
container docker-container
container0 desktop-linux inactive
default docker
default default running 20.10.17 linux/amd64, linux/386
```
If we trigger a build, the appropriate `moby/buildkit` image will be pulled
from [Docker Hub](https://hub.docker.com/u/moby/buildkit), the image started,
and our build submitted to our containerized build server.
```console
$ docker buildx build -t <image> --builder=container .
WARNING: No output specified with docker-container driver. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load
#1 [internal] booting buildkit
#1 pulling image moby/buildkit:buildx-stable-1
#1 pulling image moby/buildkit:buildx-stable-1 1.9s done
#1 creating container buildx_buildkit_container0
#1 creating container buildx_buildkit_container0 0.5s done
#1 DONE 2.4s
...
```
Note the warning "Build result will only remain in the build cache" - unlike
the `docker` driver, the built image must be explicitly loaded into the local
image store. We can use the `--load` flag for this:
```console
$ docker buildx build --load -t <image> --builder=container .
...
=> exporting to oci image format 7.7s
=> => exporting layers 4.9s
=> => exporting manifest sha256:4e4ca161fa338be2c303445411900ebbc5fc086153a0b846ac12996960b479d3 0.0s
=> => exporting config sha256:adf3eec768a14b6e183a1010cb96d91155a82fd722a1091440c88f3747f1f53f 0.0s
=> => sending tarball 2.8s
=> importing to docker
```
The image should then be available in the image store:
```console
$ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
<image> latest adf3eec768a1 2 minutes ago 197MB
```
## Further reading
For more information on the docker-container driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
<!--- FIXME: for 0.9, make reference link relative --->

View File

@@ -0,0 +1,50 @@
# Docker driver
The buildx docker driver is the default builtin driver, that uses the BuildKit
server components built directly into the docker engine.
No setup should be required for the docker driver - it should already be
configured for you:
```console
$ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
default docker
default default running 20.10.17 linux/amd64, linux/386
```
This builder is ready to build with out-of-the-box, requiring no extra setup,
so you can get going with a `docker buildx build` as soon as you like.
Depending on your personal setup, you may find multiple builders in your list
the use the docker driver. For example, on a system that runs both a package
managed version of dockerd, as well as Docker Desktop, you might have the
following:
```console
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
default docker
default default running 20.10.17 linux/amd64, linux/386
desktop-linux * docker
desktop-linux desktop-linux running 20.10.17 linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
```
This is because the docker driver builders are automatically pulled from
the available [Docker Contexts](https://docs.docker.com/engine/context/working-with-contexts/).
When you add new contexts using `docker context create`, these will appear in
your list of buildx builders.
Unlike the [other drivers](../index.md), builders using the docker driver
cannot be manually created, and can only be automatically created from the
docker context. Additionally, they cannot be configured to a specific BuildKit
version, and cannot take any extra parameters, as these are both preset by the
Docker engine internally.
If you want the extra configuration and flexibility without too much more
overhead, then see the help page for the [docker-container driver](./docker-container.md).
## Further reading
For more information on the docker driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
<!--- FIXME: for 0.9, make reference link relative --->

View File

@@ -0,0 +1,41 @@
# Buildx drivers overview
The buildx client connects out to the BuildKit backend to execute builds -
Buildx drivers allow fine-grained control over management of the backend, and
supports several different options for where and how BuildKit should run.
Currently, we support the following drivers:
- The `docker` driver, that uses the BuildKit library bundled into the Docker
daemon.
([guide](./docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `docker-container` driver, that launches a dedicated BuildKit container
using Docker, for access to advanced features.
([guide](./docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `kubernetes` driver, that launches dedicated BuildKit pods in a
remote Kubernetes cluster, for scalable builds.
([guide](./kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
- The `remote` driver, that allows directly connecting to a manually managed
BuildKit daemon, for more custom setups.
([guide](./remote.md))
<!--- FIXME: for 0.9, make links relative, and add reference link for remote --->
To create a new builder that uses one of the above drivers, you can use the
[`docker buildx create`](https://docs.docker.com/engine/reference/commandline/buildx_create/) command:
```console
$ docker buildx create --name=<builder-name> --driver=<driver> --driver-opt=<driver-options>
```
The build experience is very similar across drivers, however, there are some
features that are not evenly supported across the board, notably, the `docker`
driver does not include support for certain output/caching types.
| Feature | `docker` | `docker-container` | `kubernetes` | `remote` |
| :---------------------------- | :-------------: | :----------------: | :----------: | :--------------------: |
| **Automatic `--load`** | ✅ | ❌ | ❌ | ❌ |
| **Cache export** | ❔ (inline only) | ✅ | ✅ | ✅ |
| **Docker/OCI tarball output** | ❌ | ✅ | ✅ | ✅ |
| **Multi-arch images** | ❌ | ✅ | ✅ | ✅ |
| **BuildKit configuration** | ❌ | ✅ | ✅ | ❔ (managed externally) |

View File

@@ -0,0 +1,238 @@
# Kubernetes driver
The buildx kubernetes driver allows connecting your local development or ci
environments to your kubernetes cluster to allow access to more powerful
and varied compute resources.
This guide assumes you already have an existing kubernetes cluster - if you don't already
have one, you can easily follow along by installing
[minikube](https://minikube.sigs.k8s.io/docs/).
Before connecting buildx to your cluster, you may want to create a dedicated
namespace using `kubectl` to keep your buildx-managed resources separate. You
can call your namespace anything you want, or use the existing `default`
namespace, but we'll create a `buildkit` namespace for now:
```console
$ kubectl create namespace buildkit
```
Then create a new buildx builder:
```console
$ docker buildx create \
--bootstrap \
--name=kube \
--driver=kubernetes \
--driver-opt=namespace=buildkit
```
This assumes that the kubernetes cluster you want to connect to is currently
accessible via the kubectl command, with the `KUBECONFIG` environment variable
[set appropriately](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable)
if neccessary.
You should now be able to see the builder in the list of buildx builders:
```console
$ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
kube kubernetes
kube0-6977cdcb75-k9h9m running linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/386
default * docker
default default running linux/amd64, linux/386
```
The buildx driver creates the neccessary resources on your cluster in the
specified namespace (in this case, `buildkit`), while keeping your
driver configuration locally. You can see the running pods with:
```console
$ kubectl -n buildkit get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
kube0 1/1 1 1 32s
$ kubectl -n buildkit get pods
NAME READY STATUS RESTARTS AGE
kube0-6977cdcb75-k9h9m 1/1 Running 0 32s
```
You can use your new builder by including the `--builder` flag when running
buildx commands. For example (replacing `<user>` and `<image>` with your Docker
Hub username and desired image output respectively):
```console
$ docker buildx build . \
--builder=kube \
-t <user>/<image> \
--push
```
## Scaling Buildkit
One of the main advantages of the kubernetes builder is that you can easily
scale your builder up and down to handle increased build load. These controls
are exposed via the following options:
- `replicas=N`
- This scales the number of buildkit pods to the desired size. By default,
only a single pod will be created, but increasing this allows taking of
advantage of multiple nodes in your cluster.
- `requests.cpu`, `requests.memory`, `limits.cpu`, `limits.memory`
- These options allow requesting and limiting the resources available to each
buildkit pod according to the official kubernetes documentation
[here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
For example, to create 4 replica buildkit pods:
```console
$ docker buildx create \
--bootstrap \
--name=kube \
--driver=kubernetes \
--driver-opt=namespace=buildkit,replicas=4
```
Listing the pods, we get:
```console
$ kubectl -n buildkit get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
kube0 4/4 4 4 8s
$ kubectl -n buildkit get pods
NAME READY STATUS RESTARTS AGE
kube0-6977cdcb75-48ld2 1/1 Running 0 8s
kube0-6977cdcb75-rkc6b 1/1 Running 0 8s
kube0-6977cdcb75-vb4ks 1/1 Running 0 8s
kube0-6977cdcb75-z4fzs 1/1 Running 0 8s
```
Additionally, you can use the `loadbalance=(sticky|random)` option to control
the load-balancing behavior when there are multiple replicas. While `random`
should selects random nodes from the available pool, which should provide
better balancing across all replicas, `sticky` (the default) attempts to
connect the same build performed multiple times to the same node each time,
ensuring better local cache utilization.
For more information on scalability, see the options for [buildx create](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver-opt).
## Multi-platform builds
The kubernetes buildx driver has support for creating [multi-platform images](https://docs.docker.com/build/buildx/multiplatform-images/),
for easily building for multiple platforms at once.
### QEMU
Like the other containerized driver `docker-container`, the kubernetes driver
also supports using [QEMU](https://www.qemu.org/) (user mode) to build
non-native platforms. If using a default setup like above, no extra setup
should be needed, you should just be able to start building for other
architectures, by including the `--platform` flag.
For example, to build a Linux image for `amd64` and `arm64`:
```console
$ docker buildx build . \
--builder=kube \
--platform=linux/amd64,linux/arm64 \
-t <user>/<image> \
--push
```
> **Warning**
> QEMU performs full-system emulation of non-native platforms, which is *much*
> slower than native builds. Compute-heavy tasks like compilation and
> compression/decompression will likely take a large performance hit.
Note, if you're using a custom buildkit image using the `image=<image>` driver
option, or invoking non-native binaries from within your build, you may need to
explicitly enable QEMU using the `qemu.install` option during driver creation:
```console
$ docker buildx create \
--bootstrap \
--name=kube \
--driver=kubernetes \
--driver-opt=namespace=buildkit,qemu.install=true
```
### Native
If you have access to cluster nodes of different architectures, we can
configure the kubernetes driver to take advantage of these for native builds.
To do this, we need to use the `--append` feature of `docker buildx create`.
To start, we can create our builder with explicit support for a single
architecture, `amd64`:
```console
$ docker buildx create \
--bootstrap \
--name=kube \
--driver=kubernetes \
--platform=linux/amd64 \
--node=builder-amd64 \
--driver-opt=namespace=buildkit,nodeselector="kubernetes.io/arch=amd64"
```
This creates a buildx builder `kube` containing a single builder node `builder-amd64`.
Note that the buildx concept of a node is not the same as the kubernetes
concept of a node - the buildx node in this case could connect multiple
kubernetes nodes of the same architecture together.
With our `kube` driver created, we can now introduce another architecture into
the mix, for example, like before we can use `arm64`:
```console
$ docker buildx create \
--append \
--bootstrap \
--name=kube \
--driver=kubernetes \
--platform=linux/arm64 \
--node=builder-arm64 \
--driver-opt=namespace=buildkit,nodeselector="kubernetes.io/arch=arm64"
```
If you list builders now, you should be able to see both nodes present:
```console
$ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
kube kubernetes
builder-amd64 kubernetes:///kube?deployment=builder-amd64&kubeconfig= running linux/amd64*, linux/amd64/v2, linux/amd64/v3, linux/386
builder-arm64 kubernetes:///kube?deployment=builder-arm64&kubeconfig= running linux/arm64*
```
You should now be able to build multi-arch images with `amd64` and `arm64`
combined, by specifying those platforms together in your buildx command:
```console
$ docker buildx build --builder=kube --platform=linux/amd64,linux/arm64 -t <user>/<image> --push .
```
You can repeat the `buildx create --append` command for as many different
architectures that you want to support.
## Rootless mode
The kubernetes driver supports rootless mode. For more information on how
rootless mode works, and it's requirements, see [here](https://github.com/moby/buildkit/blob/master/docs/rootless.md).
To enable it in your cluster, you can use the `rootless=true` driver option:
```console
$ docker buildx create \
--name=kube \
--driver=kubernetes \
--driver-opt=namespace=buildkit,rootless=true
```
This will create your pods without `securityContext.privileged`.
## Further reading
For more information on the kubernetes driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
<!--- FIXME: for 0.9, make reference link relative --->

View File

@@ -0,0 +1,178 @@
# Remote driver
The buildx remote driver allows for more complex custom build workloads that
allow users to connect to external buildkit instances. This is useful for
scenarios that require manual management of the buildkit daemon, or where a
buildkit daemon is exposed from another source.
To connect to a running buildkitd instance:
```console
$ docker buildx create \
--name remote \
--driver remote \
tcp://localhost:1234
```
## Remote Buildkit over Unix sockets
In this scenario, we'll create a setup with buildkitd listening on a unix
socket, and have buildx connect through it.
Firstly, ensure that [buildkit](https://github.com/moby/buildkit) is installed.
For example, you can launch an instance of buildkitd with:
```console
$ sudo ./buildkitd --group $(id -gn) --addr unix://$HOME/buildkitd.sock
```
Alternatively, [see here](https://github.com/moby/buildkit/blob/master/docs/rootless.md)
for running buildkitd in rootless mode or [here](https://github.com/moby/buildkit/tree/master/examples/systemd)
for examples of running it as a systemd service.
You should now have a unix socket accessible to your user, that is available to
connect to:
```console
$ ls -lh /home/user/buildkitd.sock
srw-rw---- 1 root user 0 May 5 11:04 /home/user/buildkitd.sock
```
You can then connect buildx to it with the remote driver:
```console
$ docker buildx create \
--name remote-unix \
--driver remote \
unix://$HOME/buildkitd.sock
```
If you list available builders, you should then see `remote-unix` among them:
```console
$ docker buildx ls
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
remote-unix remote
remote-unix0 unix:///home/.../buildkitd.sock running linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/386
default * docker
default default running linux/amd64, linux/386
```
We can switch to this new builder as the default using `docker buildx use remote-unix`,
or specify it per build:
```console
$ docker buildx build --builder=remote-unix -t test --load .
```
(remember that `--load` is necessary when not using the default `docker`
driver, to load the build result into the docker daemon)
## Remote Buildkit in Docker container
In this scenario, we'll create a similar setup to the `docker-container`
driver, by manually booting a buildkit docker container and connecting to it
using the buildx remote driver. In most cases you'd probably just use the
`docker-container` driver that connects to buildkit through the Docker daemon,
but in this case we manually create a container and access it via it's exposed
port.
First, we need to generate certificates for buildkit - you can use the
[create-certs.sh](https://github.com/moby/buildkit/v0.10.3/master/examples/kubernetes/create-certs.sh)
script as a starting point. Note, that while it is *possible* to expose
buildkit over TCP without using TLS, it is **not recommended**, since this will
allow arbitrary access to buildkit without credentials.
With our certificates generated in `.certs/`, we startup the container:
```console
$ docker run -d --rm \
--name=remote-buildkitd \
--privileged \
-p 1234:1234 \
-v $PWD/.certs:/etc/buildkit/certs \
moby/buildkit:latest \
--addr tcp://0.0.0.0:1234 \
--tlscacert /etc/buildkit/certs/ca.pem \
--tlscert /etc/buildkit/certs/daemon-cert.pem \
--tlskey /etc/buildkit/certs/daemon-key.pem
```
The above command starts a buildkit container and exposes the daemon's port
1234 to localhost.
We can now connect to this running container using buildx:
```console
$ docker buildx create \
--name remote-container \
--driver remote \
--driver-opt cacert=.certs/ca.pem,cert=.certs/client-cert.pem,key=.certs/client-key.pem,servername=... \
tcp://localhost:1234
```
Alternatively, we could use the `docker-container://` URL scheme to connect
to the buildkit container without specifying a port:
```console
$ docker buildx create \
--name remote-container \
--driver remote \
docker-container://remote-container
```
## Remote Buildkit in Kubernetes
In this scenario, we'll create a similar setup to the `kubernetes` driver by
manually creating a buildkit `Deployment`. While the `kubernetes` driver will
do this under-the-hood, it might sometimes be desirable to scale buildkit
manually. Additionally, when executing builds from inside Kubernetes pods,
the buildx builder will need to be recreated from within each pod or copied
between them.
Firstly, we can create a kubernetes deployment of buildkitd, as per the
instructions [here](https://github.com/moby/buildkit/tree/master/examples/kubernetes).
Following the guide, we setup certificates for the buildkit daemon and client
(as above using [create-certs.sh](https://github.com/moby/buildkit/blob/v0.10.3/examples/kubernetes/create-certs.sh))
and create a `Deployment` of buildkit pods with a service that connects to
them.
Assuming that the service is called `buildkitd`, we can create a remote builder
in buildx, ensuring that the listed certificate files are present:
```console
$ docker buildx create \
--name remote-kubernetes \
--driver remote \
--driver-opt cacert=.certs/ca.pem,cert=.certs/client-cert.pem,key=.certs/client-key.pem \
tcp://buildkitd.default.svc:1234
```
Note that the above will only work in-cluster (since the buildkit setup guide
only creates a ClusterIP service). To configure the builder to be accessible
remotely, you can use an appropriately configured Ingress, which is outside the
scope of this guide.
To access the service remotely, we can use the port forwarding mechanism in
kubectl:
```console
$ kubectl port-forward svc/buildkitd 1234:1234
```
Then you can simply point the remote driver at `tcp://localhost:1234`.
Alternatively, we could use the `kube-pod://` URL scheme to connect
directly to a buildkit pod through the kubernetes api (note that this method
will only connect to a single pod in the deployment):
```console
$ kubectl get pods --selector=app=buildkitd -o json | jq -r '.items[].metadata.name
buildkitd-XXXXXXXXXX-xxxxx
$ docker buildx create \
--name remote-container \
--driver remote \
kube-pod://buildkitd-XXXXXXXXXX-xxxxx
```
<!--- FIXME: for 0.9, add further reading section with link to reference --->

View File

@@ -1,14 +0,0 @@
# Buildx manuals 📚
This directory contains a bunch of useful docs for how to use Buildx features.
> **Note**
>
> The markdown files in this directory (excluding this README) are reused
> downstream by the
> [Docker documentation repository](https://github.com/docker/docs).
>
> If you wish to contribute to these docs, be sure to first review the
> [documentation contribution guidelines](https://docs.docker.com/contribute/overview/).
>
> Thank you!

View File

@@ -1,3 +0,0 @@
# Defining additional build contexts and linking targets
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/build-contexts)

View File

@@ -1,3 +0,0 @@
# Building from Compose file
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/compose-file)

View File

@@ -1,3 +0,0 @@
# Configuring builds
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/configuring-build)

View File

@@ -1,3 +0,0 @@
# Bake file definition
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/file-definition)

View File

@@ -1,3 +0,0 @@
# User defined HCL functions
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake/hcl-funcs)

View File

@@ -1,3 +0,0 @@
# High-level build options with Bake
Moved to [docs.docker.com](https://docs.docker.com/build/customize/bake)

View File

@@ -1,3 +0,0 @@
# Azure Blob Storage cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/azblob)

View File

@@ -1,3 +0,0 @@
# GitHub Actions cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/gha)

View File

@@ -1,3 +0,0 @@
# Cache storage backends
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends)

View File

@@ -1,3 +0,0 @@
# Inline cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/inline)

View File

@@ -1,3 +0,0 @@
# Local cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/local)

View File

@@ -1,3 +0,0 @@
# Registry cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/registry)

View File

@@ -1,3 +0,0 @@
# Amazon S3 cache storage
Moved to [docs.docker.com](https://docs.docker.com/build/building/cache/backends/s3)

View File

@@ -1,3 +0,0 @@
# Docker container driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/docker-container)

View File

@@ -1,3 +0,0 @@
# Docker driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/docker)

View File

@@ -1,3 +0,0 @@
# Buildx drivers overview
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers)

View File

@@ -1,3 +0,0 @@
# Kubernetes driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/kubernetes)

View File

@@ -1,3 +0,0 @@
# Remote driver
Moved to [docs.docker.com](https://docs.docker.com/build/building/drivers/remote)

View File

@@ -1,3 +0,0 @@
# Image and registry exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/image-registry)

View File

@@ -1,3 +0,0 @@
# Exporters overview
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters)

View File

@@ -1,3 +0,0 @@
# Local and tar exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/local-tar)

View File

@@ -1,3 +0,0 @@
# OCI and Docker exporters
Moved to [docs.docker.com](https://docs.docker.com/build/building/exporters/oci-docker)

View File

@@ -22,10 +22,8 @@ Build from a file
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
| [`--print`](#print) | | | Print the options without building |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
| `--provenance` | `string` | | Shorthand for `--set=*.attest=type=provenance` |
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
| `--sbom` | `string` | | Shorthand for `--set=*.attest=type=sbom` |
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
@@ -36,7 +34,7 @@ Build from a file
Bake is a high-level build command. Each specified target will run in parallel
as part of the build.
Read [High-level build options with Bake](https://docs.docker.com/build/customize/bake/)
Read [High-level build options with Bake](https://docs.docker.com/build/bake/)
guide for introduction to writing bake files.
> **Note**
@@ -87,7 +85,7 @@ target "db" {
$ docker buildx bake -f docker-bake.dev.hcl db webapp-release
```
See our [file definition](https://docs.docker.com/build/customize/bake/file-definition/)
See our [file definition](https://docs.docker.com/build/bake/file-definition/)
guide for more details.
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)

View File

@@ -17,7 +17,6 @@ Start a build
| --- | --- | --- | --- |
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
@@ -37,11 +36,9 @@ Start a build
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
| `--print` | `string` | | Print result of information request (e.g., outline, targets) [experimental] |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
| `--provenance` | `string` | | Shortand for `--attest=type=provenance` |
| `--pull` | | | Always attempt to pull all referenced images |
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
@@ -131,8 +128,7 @@ $ docker buildx build --build-context project=path/to/project/source .
# docker buildx build --build-context project=https://github.com/myuser/project.git .
```
```dockerfile
# syntax=docker/dockerfile:1
```Dockerfile
FROM alpine
COPY --from=project myfile /
```
@@ -145,10 +141,10 @@ Source an image from a local [OCI layout compliant directory](https://github.com
$ docker buildx build --build-context foo=oci-layout:///path/to/local/layout@sha256:abcd12345 .
```
```dockerfile
# syntax=docker/dockerfile:1
```Dockerfile
FROM alpine
RUN apk add git
COPY --from=foo myfile /
FROM foo
@@ -174,7 +170,7 @@ Same as [`buildx --builder`](buildx.md#builder).
```
Use an external cache source for a build. Supported types are `registry`,
`local`, `gha` and `s3`.
`local` and `gha`.
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
can import cache from a cache manifest or (special) image configuration on the
@@ -184,9 +180,6 @@ Use an external cache source for a build. Supported types are `registry`,
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
GitHub repository
- [`s3` source](https://github.com/moby/buildkit#s3-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
S3 bucket
If no type is specified, `registry` exporter is used with a specified reference.
@@ -198,7 +191,6 @@ $ docker buildx build --cache-from=user/app .
$ docker buildx build --cache-from=type=registry,ref=user/app .
$ docker buildx build --cache-from=type=local,src=path/to/cache .
$ docker buildx build --cache-from=type=gha .
$ docker buildx build --cache-from=type=s3,region=eu-west-1,bucket=mybucket .
```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
@@ -210,17 +202,15 @@ More info about cache exporters and available attributes: https://github.com/mob
```
Export build cache to an external cache destination. Supported types are
`registry`, `local`, `inline`, `gha` and `s3`.
`registry`, `local`, `inline` and `gha`.
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
- [`local` type](https://github.com/moby/buildkit#local-directory-1) exports
cache to a local directory on the client.
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
exports cache to a local directory on the client.
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
writes the cache metadata into the image configuration.
type writes the cache metadata into the image configuration.
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
exports cache through the [GitHub Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
- [`s3` type](https://github.com/moby/buildkit#s3-cache-experimental) exports
cache to a S3 bucket.
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
`docker` driver currently only supports exporting inline cache metadata to image
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
@@ -238,7 +228,6 @@ $ docker buildx build --cache-to=type=inline .
$ docker buildx build --cache-to=type=registry,ref=user/app .
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
$ docker buildx build --cache-to=type=gha .
$ docker buildx build --cache-to=type=s3,region=eu-west-1,bucket=mybucket .
```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
@@ -479,7 +468,7 @@ Attribute keys:
- `src`, `source` - Secret filename. `id` used if unset.
```dockerfile
# syntax=docker/dockerfile:1
# syntax=docker/dockerfile:1.4
FROM python:3
RUN pip install awscli
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
@@ -498,7 +487,7 @@ Attribute keys:
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
```dockerfile
# syntax=docker/dockerfile:1
# syntax=docker/dockerfile:1.4
FROM node:alpine
RUN --mount=type=bind,target=. \
--mount=type=secret,id=SECRET_TOKEN \
@@ -530,7 +519,7 @@ authentication (e.g., cloning a private repository).
Example to access Gitlab using an SSH agent socket:
```dockerfile
# syntax=docker/dockerfile:1
# syntax=docker/dockerfile:1.4
FROM alpine
RUN apk add --no-cache openssh-client
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts

View File

@@ -16,6 +16,7 @@ import (
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/docker/api/types"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
@@ -36,6 +37,7 @@ const (
type Driver struct {
driver.InitConfig
factory driver.Factory
userNSRemap bool // true if dockerd is running with userns-remap mode
netMode string
image string
cgroupParent string
@@ -82,7 +84,7 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
if err != nil {
return err
}
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, dockertypes.ImageCreateOptions{
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, types.ImageCreateOptions{
RegistryAuth: ra,
})
if err != nil {
@@ -119,30 +121,19 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
},
},
}
if d.userNSRemap {
hc.UsernsMode = "host"
}
if d.netMode != "" {
hc.NetworkMode = container.NetworkMode(d.netMode)
}
if info, err := d.DockerAPI.Info(ctx); err == nil {
if info.CgroupDriver == "cgroupfs" {
// Place all buildkit containers inside this cgroup by default so limits can be attached
// to all build activity on the host.
hc.CgroupParent = "/docker/buildx"
if d.cgroupParent != "" {
hc.CgroupParent = d.cgroupParent
}
if info, err := d.DockerAPI.Info(ctx); err == nil && info.CgroupDriver == "cgroupfs" {
// Place all buildkit containers inside this cgroup by default so limits can be attached
// to all build activity on the host.
hc.CgroupParent = "/docker/buildx"
if d.cgroupParent != "" {
hc.CgroupParent = d.cgroupParent
}
secOpts, err := dockertypes.DecodeSecurityOptions(info.SecurityOptions)
if err != nil {
return err
}
for _, f := range secOpts {
if f.Name == "userns" {
hc.UsernsMode = "host"
break
}
}
}
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
if err != nil {
@@ -195,7 +186,7 @@ func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
}
func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
rc, err := d.DockerAPI.ContainerLogs(ctx, d.Name, dockertypes.ContainerLogsOptions{
rc, err := d.DockerAPI.ContainerLogs(ctx, d.Name, types.ContainerLogsOptions{
ShowStdout: true, ShowStderr: true,
})
if err != nil {
@@ -228,7 +219,7 @@ func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) e
}
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
execConfig := dockertypes.ExecConfig{
execConfig := types.ExecConfig{
Cmd: cmd,
AttachStdin: true,
AttachStdout: true,
@@ -244,7 +235,7 @@ func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, erro
return "", nil, errors.New("exec ID empty")
}
resp, err := d.DockerAPI.ContainerExecAttach(ctx, execID, dockertypes.ExecStartCheck{})
resp, err := d.DockerAPI.ContainerExecAttach(ctx, execID, types.ExecStartCheck{})
if err != nil {
return "", nil, err
}
@@ -271,7 +262,7 @@ func (d *Driver) run(ctx context.Context, cmd []string, stdout, stderr io.Writer
}
func (d *Driver) start(ctx context.Context, l progress.SubLogger) error {
return d.DockerAPI.ContainerStart(ctx, d.Name, dockertypes.ContainerStartOptions{})
return d.DockerAPI.ContainerStart(ctx, d.Name, types.ContainerStartOptions{})
}
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {

View File

@@ -6,6 +6,7 @@ import (
"strings"
"github.com/docker/buildx/driver"
dockertypes "github.com/docker/docker/api/types"
dockerclient "github.com/docker/docker/client"
"github.com/pkg/errors"
)
@@ -40,6 +41,20 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
return nil, errors.Errorf("%s driver requires docker API access", f.Name())
}
d := &Driver{factory: f, InitConfig: cfg}
dockerInfo, err := cfg.DockerAPI.Info(ctx)
if err != nil {
return nil, err
}
secOpts, err := dockertypes.DecodeSecurityOptions(dockerInfo.SecurityOptions)
if err != nil {
return nil, err
}
for _, f := range secOpts {
if f.Name == "userns" {
d.userNSRemap = true
break
}
}
for k, v := range cfg.DriverOpts {
switch {
case k == "network":

View File

@@ -1,10 +1,8 @@
package context
import (
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context"
@@ -155,20 +153,3 @@ func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig},
&clientcmd.ConfigOverrides{})
}
// ConfigFromEndpoint loads kubernetes config from endpoint
func ConfigFromEndpoint(endpointName string, s store.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") {
u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
}
rules := clientcmd.NewDefaultClientConfigLoadingRules()
apiConfig, err := rules.Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
}
return ConfigFromContext(endpointName, s)
}

View File

@@ -13,7 +13,7 @@ import (
func TestDefaultContextInitializer(t *testing.T) {
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
defer os.Unsetenv("KUBECONFIG")
ctx, err := command.ResolveDefaultContext(&cliflags.ClientOptions{}, command.DefaultContextStoreConfig())
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, command.DefaultContextStoreConfig())
require.NoError(t, err)
assert.Equal(t, "default", ctx.Meta.Name)
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)

92
go.mod
View File

@@ -3,37 +3,35 @@ module github.com/docker/buildx
go 1.17
require (
github.com/aws/aws-sdk-go-v2/config v1.15.5
github.com/compose-spec/compose-go v1.6.0
github.com/compose-spec/compose-go v1.4.0
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.6.11
github.com/docker/cli v23.0.0-beta.1+incompatible
github.com/containerd/containerd v1.6.6
github.com/docker/cli v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
github.com/docker/cli-docs-tool v0.5.0
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v23.0.0-beta.1+incompatible
github.com/docker/go-units v0.5.0
github.com/gofrs/flock v0.8.1
github.com/docker/docker v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
github.com/docker/go-units v0.4.0
github.com/gofrs/flock v0.7.3
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
github.com/hashicorp/hcl/v2 v2.8.2
github.com/moby/buildkit v0.11.0-rc1.0.20221213193744-862b22d7e7cf
github.com/moby/sys/mountinfo v0.6.2
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/pelletier/go-toml v1.9.5
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.1
github.com/stretchr/testify v1.8.0
github.com/zclconf/go-cty v1.10.0
go.opentelemetry.io/otel v1.4.1
go.opentelemetry.io/otel/trace v1.4.1
golang.org/x/sync v0.1.0
golang.org/x/term v0.3.0
google.golang.org/grpc v1.50.1
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
google.golang.org/grpc v1.47.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
@@ -41,8 +39,7 @@ require (
)
require (
cloud.google.com/go/compute v1.12.1 // indirect
cloud.google.com/go/compute/metadata v0.2.1 // indirect
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
@@ -56,16 +53,6 @@ require (
github.com/apparentlymart/go-cidr v1.0.1 // indirect
github.com/apparentlymart/go-textseg/v12 v12.0.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.16.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
github.com/aws/smithy-go v1.11.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
github.com/bugsnag/panicwrap v1.2.0 // indirect
@@ -78,7 +65,7 @@ require (
github.com/containerd/typeurl v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
@@ -86,15 +73,15 @@ require (
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/certificate-transparency-go v1.0.21 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
@@ -103,36 +90,33 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/gorm v1.9.2 // indirect
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/compress v1.15.7 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect
github.com/moby/sys/signal v0.6.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/client_golang v1.12.2 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/spf13/viper v1.14.0 // indirect
github.com/theupdateframework/notary v0.6.1 // indirect
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf // indirect
github.com/tonistiigi/fsutil v0.0.0-20220510150904-0dbf3a8a7d58 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
@@ -149,15 +133,15 @@ require (
go.opentelemetry.io/otel/metric v0.27.0 // indirect
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
golang.org/x/crypto v0.2.0 // indirect
golang.org/x/net v0.4.0 // indirect
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
@@ -171,6 +155,8 @@ require (
)
replace (
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible // master (v22.06-dev)
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible // 22.06 branch (v22.06-dev)
k8s.io/api => k8s.io/api v0.22.4
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
k8s.io/client-go => k8s.io/client-go v0.22.4

337
go.sum
View File

@@ -1,3 +1,4 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -13,16 +14,18 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -57,8 +60,9 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo=
github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
@@ -81,29 +85,8 @@ github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbj
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM=
github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
github.com/aws/aws-sdk-go-v2/config v1.15.5 h1:P+xwhr6kabhxDTXTVH9YoHkqjLJ0wVVpIUHtFNr2hjU=
github.com/aws/aws-sdk-go-v2/config v1.15.5/go.mod h1:ZijHHh0xd/A+ZY53az0qzC5tT46kt4JVCePf2NX9Lk4=
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 h1:4R/NqlcRFSkR0wxOhgHi+agGpbEr5qMCjn7VqUIJY+E=
github.com/aws/aws-sdk-go-v2/credentials v1.12.0/go.mod h1:9YWk7VW+eyKsoIL6/CljkTrNVWBSK9pkqOPUuijid4A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 h1:FP8gquGeGHHdfY6G5llaMQDF+HAf20VKc8opRwmjf04=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4/go.mod h1:u/s5/Z+ohUQOPXl00m2yJVyioWDECsbpXTQlaqSlufc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 h1:uFWgo6mGJI1n17nbcvSc6fxVuR3xLNqvXt12JCnEcT8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 h1:cnsvEKSoHN4oAN7spMMr0zhEW2MHnhAVpmqQg8E6UcM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 h1:6cZRymlLEIlDTEB0+5+An6Zj1CKt6rSE69tOmFeu1nk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11/go.mod h1:0MR+sS1b/yxsfAPvAESrw8NfwUoxMinDyw6EYR9BS2U=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 h1:b16QW0XWl0jWjLABFc1A+uh145Oqv+xDcObNk0iQgUk=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 h1:Uw5wBybFQ1UeA9ts0Y07gbv0ncZnIAyw858tDW0NP2o=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4/go.mod h1:cPDwJwsP4Kff9mldCXAmddjJL6JGQqtA3Mzer2zyr88=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 h1:+xtV90n3abQmgzk1pS++FdxZTrPEDgQng6e4/56WR2A=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE=
github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -118,6 +101,7 @@ github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -130,49 +114,59 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e h1:Qux+lbuMaRzkQyTdzgtz8MgzPtzmaPQy6DXmxpdxT3U=
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/compose-spec/compose-go v1.6.0 h1:7Ol/UULMUtbPmB0EYrETASRoum821JpOh/XaEf+hN+Q=
github.com/compose-spec/compose-go v1.6.0/go.mod h1:os+Ulh2jlZxY1XT1hbciERadjSUU/BtZ6+gcN7vD7J0=
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
github.com/compose-spec/compose-go v1.4.0 h1:zaYVAZ6lIByr7Jffi20AabfeUwcTrdXfH3X1R5HEm+g=
github.com/compose-spec/compose-go v1.4.0/go.mod h1:l7RUULbFFLzlQHuxtJr7SVLyWdqEpbJEGTWCgcu6Eqw=
github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.6.11 h1:uIn0uKrRhETIPyAb0lz6WY2xhYBObUOF1bBi5rqZ5x4=
github.com/containerd/containerd v1.6.11/go.mod h1:K4Bw7gjgh4TnkmQY+py/PYQGp4e7xgnHAeg87VeWb3A=
github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk=
github.com/containerd/stargz-snapshotter v0.13.0 h1:3zr1/IkW1aEo6cMYTQeZ4L2jSuCN+F4kgGfjnuowe4U=
github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb h1:oCCuuU3kMO3sjZH/p7LamvQNW9SWoT4yQuMGcdSxGAE=
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
github.com/docker/cli v23.0.0-beta.1+incompatible h1:K9CMaN5nHB1eu2f02PURnJhlPhWuFl0s9mL3kildAtE=
github.com/docker/cli v23.0.0-beta.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible h1:iKanFYBu6Cum7d9j8JGTw2s/d7hUAcXRkEcp2m8b6Qc=
github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.5.0 h1:EjGwI6EyB7YemHCC7R8mwXszJTbuq0T0pFuDC5bMhcE=
github.com/docker/cli-docs-tool v0.5.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v23.0.0-beta.1+incompatible h1:0Xv+AFPWxTbmohdLK57pYRPmefCKthtfRF/qQwXHolg=
github.com/docker/docker v23.0.0-beta.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible h1:gaceOC7utpmWqA7OF95kxXO2lGNTkbHIvgxpE+0hPi8=
github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -180,12 +174,12 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q=
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
@@ -194,8 +188,10 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
@@ -204,8 +200,8 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -215,17 +211,14 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -238,19 +231,20 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.7.3 h1:I0EKY9l8HZCXTMYC4F80vwT6KNypV9uYKP3Alm/hjmQ=
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -263,6 +257,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -279,6 +274,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -293,17 +289,21 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -311,6 +311,10 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -325,10 +329,14 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
@@ -338,22 +346,22 @@ github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840/go.mod h1:A
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY=
github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/in-toto/in-toto-golang v0.3.4-0.20220709202702-fa494aaa0add h1:DAh7mHiRT7wc6kKepYdCpH16ElPciMPQWJaJ7H3l/ng=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jinzhu/gorm v1.9.2 h1:lCvgEaqe/HVE+tjAR2mt4HbbHAZsQOv3XAZiEZV37iw=
github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k=
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -367,10 +375,11 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -383,16 +392,17 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -401,23 +411,18 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.11.0-rc1.0.20221213193744-862b22d7e7cf h1:X4p2o1QeaKAJ8prPctFx98UrokFHsUgbiC0lDUObpOk=
github.com/moby/buildkit v0.11.0-rc1.0.20221213193744-862b22d7e7cf/go.mod h1:f3jvilDvcG14z+gzPpA2lcWRwIRyFiNTo5bMwHiYDk0=
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a h1:NI01Z14Hbwo1MHq8ylu4HNkmKGnhk8UZsD6c6FVMcA8=
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a/go.mod h1:Wa+LkeUQ9NJTVXTAY38rhkfKVQcuCIo2fbavRSuGsbI=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -434,6 +439,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
@@ -443,19 +449,18 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs=
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -465,50 +470,49 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -517,44 +521,50 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=
github.com/theupdateframework/notary v0.6.1/go.mod h1:MOfgIfmox8s7/7fduvB2xyPPMJCrjRLRizA8OFwpnKY=
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf h1:2n2v98sRhXEG0Kh7+EvctaNIyOim36Ekp4pGDzbuvO8=
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20220510150904-0dbf3a8a7d58 h1:rNya5ozLqz0lK46XhmsmqJuxmQLM8dAWabiT+5gZqLY=
github.com/tonistiigi/fsutil v0.0.0-20220510150904-0dbf3a8a7d58/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f h1:DLpt6B5oaaS8jyXHa9VA4rrZloBVPVXeCtrOsrFauxc=
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
@@ -566,6 +576,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -575,12 +587,15 @@ github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q
github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0=
github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk=
@@ -627,8 +642,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -651,6 +666,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -659,18 +675,22 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -693,25 +713,33 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -723,11 +751,12 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -766,9 +795,18 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -779,34 +817,31 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -818,6 +853,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -848,7 +884,13 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -870,6 +912,11 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -909,11 +956,22 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 h1:FglFEfyj61zP3c6LgjmVHxYxZWXYul9oiS1EZqD5gLc=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@@ -924,12 +982,17 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -942,9 +1005,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -962,8 +1024,9 @@ gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y
gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -979,6 +1042,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env sh
set -e
: "${DESTDIR=./bin/build}"
: "${PACKAGE=github.com/docker/buildx}"
: "${VERSION=$(./hack/git-meta version)}"
: "${REVISION=$(./hack/git-meta revision)}"
: "${CGO_ENABLED=0}"
: "${GO_PKG=github.com/docker/buildx}"
: "${GO_LDFLAGS=-X ${GO_PKG}/version.Version=${VERSION} -X ${GO_PKG}/version.Revision=${REVISION} -X ${GO_PKG}/version.Package=${PACKAGE}}"
: "${GO_EXTRA_LDFLAGS=}"
set -x
CGO_ENABLED=$CGO_ENABLED go build -mod vendor -trimpath -ldflags "${GO_LDFLAGS} ${GO_EXTRA_LDFLAGS}" -o "${DESTDIR}/docker-buildx" ./cmd/buildx

View File

@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1.4
ARG GO_VERSION=1.19
ARG GO_VERSION=1.18
ARG FORMATS=md,yaml
FROM golang:${GO_VERSION}-alpine AS docsgen

View File

@@ -1,10 +1,10 @@
# syntax=docker/dockerfile:1.4
ARG GO_VERSION=1.19
ARG GO_VERSION=1.18
FROM golang:${GO_VERSION}-alpine
RUN apk add --no-cache git gcc musl-dev
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.48.0
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.45.2
WORKDIR /go/src/github.com/docker/buildx
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
golangci-lint run

View File

@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1.4
ARG GO_VERSION=1.19
ARG GO_VERSION=1.18
ARG MODOUTDATED_VERSION=v0.8.0
FROM golang:${GO_VERSION}-alpine AS base

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env sh
set -e
case $1 in
"version")
git describe --match 'v[0-9]*' --dirty='.m' --always --tags
;;
"revision")
echo "$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)"
;;
*)
echo "usage: ./hack/git-meta <version|revision>"
exit 1
;;
esac

View File

@@ -2,16 +2,17 @@
set -eu -o pipefail
: "${DESTDIR=./bin/release}"
if [ ! -d "$DESTDIR" ]; then
exit 0
fi
: ${RELEASE_OUT=./release-out}
# checksums
if ! type shasum > /dev/null 2>&1; then
echo >&2 "ERROR: shasum is required"
exit 1
fi
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt
# verify
(
cd ${DESTDIR}
sha256sum -b buildx-* > ./checksums.txt
sed -i '/darwin/d' ./checksums.txt
sha256sum -c --strict checksums.txt
cd ./${RELEASE_OUT}
shasum -a 256 -U -c checksums.txt
)

View File

@@ -2,56 +2,27 @@
set -eu -o pipefail
: "${GITHUB_ACTIONS=}"
: "${GITHUB_REPOSITORY=}"
: "${GITHUB_RUN_ID=}"
: "${BUILDX_CMD=docker buildx}"
: "${DESTDIR=./bin/release}"
: "${CACHE_FROM=}"
: "${CACHE_TO=}"
: "${PLATFORMS=}"
: ${BUILDX_CMD=docker buildx}
: ${RELEASE_OUT=./release-out}
: ${CACHE_FROM=}
: ${CACHE_TO=}
if [ -n "$CACHE_FROM" ]; then
for cfrom in $CACHE_FROM; do
setFlags+=(--set "*.cache-from=$cfrom")
cacheFlags+=(--set "*.cache-from=$cfrom")
done
fi
if [ -n "$CACHE_TO" ]; then
for cto in $CACHE_TO; do
setFlags+=(--set "*.cache-to=$cto")
cacheFlags+=(--set "*.cache-to=$cto")
done
fi
if [ -n "$PLATFORMS" ]; then
setFlags+=(--set "*.platform=$PLATFORMS")
fi
if ${BUILDX_CMD} build --help 2>&1 | grep -- '--attest' >/dev/null; then
prvattrs="mode=max"
if [ "$GITHUB_ACTIONS" = "true" ]; then
prvattrs="$prvattrs,builder-id=https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}"
fi
setFlags+=(--set "*.attest=type=sbom")
setFlags+=(--set "*.attest=type=provenance,$prvattrs")
fi
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
# release
(set -x ; ${BUILDX_CMD} bake "${cacheFlags[@]}" --set "*.output=$RELEASE_OUT" release)
(
set -x
${BUILDX_CMD} bake "${setFlags[@]}" --set "*.args.BUILDKIT_MULTI_PLATFORM=true" --set "*.output=$output" release
)
# wrap binaries
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/
find ./${RELEASE_OUT} -type d -empty -delete
for pdir in "${output}"/*/; do
(
cd "$pdir"
binname=$(find . -name 'buildx-*')
filename=$(basename "${binname%.exe}")
mv "provenance.json" "${filename}.provenance.json"
mv "sbom-binaries.spdx.json" "${filename}.sbom.json"
find . -name 'sbom*.json' -exec rm {} \;
)
done
mkdir -p "$DESTDIR"
mv "$output"/**/* "$DESTDIR/"
rm -rf "$output"
source ./hack/hash-files

View File

@@ -2,8 +2,8 @@
set -e
: "${BUILDX_CMD=docker buildx}"
: "${TMUX=}"
: ${BUILDX_CMD=docker buildx}
: ${TMUX=}
function clean {
docker rmi $iid

View File

@@ -2,14 +2,14 @@
set -eu -o pipefail
: "${BUILDX_CMD=docker buildx}"
: "${BUILDKIT_IMAGE=moby/buildkit:buildx-stable-1}"
: "${BUILDKIT_CFG=}"
: "${DRIVER=docker-container}"
: "${DRIVER_OPT=}"
: "${ENDPOINT=}"
: "${MULTI_NODE=0}"
: "${PLATFORMS=linux/amd64,linux/arm64}"
: ${BUILDX_CMD=docker buildx}
: ${BUILDKIT_IMAGE=moby/buildkit:buildx-stable-1}
: ${BUILDKIT_CFG=}
: ${DRIVER=docker-container}
: ${DRIVER_OPT=}
: ${ENDPOINT=}
: ${MULTI_NODE=0}
: ${PLATFORMS=linux/amd64,linux/arm64}
function buildxCmd {
(set -x ; $BUILDX_CMD "$@")
@@ -131,14 +131,6 @@ buildxCmd build ${buildPlatformFlag} \
"${context}"
cat "${context}/metadata-build.json"
# load to docker store
if [ "$DRIVER" != "docker" ]; then
buildxCmd build \
--output="type=docker,name=buildx-test-load" \
--builder="${builderName}" \
"${context}"
fi
# create bake def
cat > "${bakedef}" <<EOL
group "default" {

View File

@@ -2,11 +2,11 @@
set -eu -o pipefail
: "${BUILDX_CMD=docker buildx}"
: "${FORMATS=md}"
: ${BUILDX_CMD=docker buildx}
: ${FORMATS=md}
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
(set -x ; DOCS_FORMATS=$FORMATS ${BUILDX_CMD} bake --set "*.output=$output" update-docs)
rm -rf ./docs/reference/*
cp -R "$output"/out/* ./docs/
rm -rf "$output"
rm -rf $output

View File

@@ -2,10 +2,10 @@
set -eu -o pipefail
: "${BUILDX_CMD=docker buildx}"
: ${BUILDX_CMD=docker buildx}
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
(set -x ; ${BUILDX_CMD} bake --set "*.output=$output" update-vendor)
rm -rf ./vendor
cp -R "$output"/out/* .
rm -rf "$output"
rm -rf $output

View File

@@ -2,7 +2,6 @@ package store
import (
"fmt"
"time"
"github.com/containerd/containerd/platforms"
"github.com/docker/buildx/util/confutil"
@@ -17,10 +16,6 @@ type NodeGroup struct {
Driver string
Nodes []Node
Dynamic bool
// skip the following fields from being saved in the store
DockerContext bool `json:"-"`
LastActivity time.Time `json:"-"`
}
type Node struct {

View File

@@ -4,8 +4,8 @@ import (
"encoding/json"
"os"
"path/filepath"
"regexp"
"sort"
"time"
"github.com/docker/docker/pkg/ioutils"
"github.com/gofrs/flock"
@@ -13,20 +13,11 @@ import (
"github.com/pkg/errors"
)
const (
instanceDir = "instances"
defaultsDir = "defaults"
activityDir = "activity"
)
func New(root string) (*Store, error) {
if err := os.MkdirAll(filepath.Join(root, instanceDir), 0700); err != nil {
if err := os.MkdirAll(filepath.Join(root, "instances"), 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Join(root, defaultsDir), 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Join(root, activityDir), 0700); err != nil {
if err := os.MkdirAll(filepath.Join(root, "defaults"), 0700); err != nil {
return nil, err
}
return &Store{root: root}, nil
@@ -53,7 +44,7 @@ type Txn struct {
}
func (t *Txn) List() ([]*NodeGroup, error) {
pp := filepath.Join(t.s.root, instanceDir)
pp := filepath.Join(t.s.root, "instances")
fis, err := os.ReadDir(pp)
if err != nil {
return nil, err
@@ -83,7 +74,7 @@ func (t *Txn) NodeGroupByName(name string) (*NodeGroup, error) {
if err != nil {
return nil, err
}
dt, err := os.ReadFile(filepath.Join(t.s.root, instanceDir, name))
dt, err := os.ReadFile(filepath.Join(t.s.root, "instances", name))
if err != nil {
return nil, err
}
@@ -91,9 +82,6 @@ func (t *Txn) NodeGroupByName(name string) (*NodeGroup, error) {
if err := json.Unmarshal(dt, &ng); err != nil {
return nil, err
}
if ng.LastActivity, err = t.GetLastActivity(&ng); err != nil {
return nil, err
}
return &ng, nil
}
@@ -102,14 +90,11 @@ func (t *Txn) Save(ng *NodeGroup) error {
if err != nil {
return err
}
if err := t.UpdateLastActivity(ng); err != nil {
return err
}
dt, err := json.Marshal(ng)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(t.s.root, instanceDir, name), dt, 0600)
return ioutils.AtomicWriteFile(filepath.Join(t.s.root, "instances", name), dt, 0600)
}
func (t *Txn) Remove(name string) error {
@@ -117,10 +102,7 @@ func (t *Txn) Remove(name string) error {
if err != nil {
return err
}
if err := t.RemoveLastActivity(name); err != nil {
return err
}
return os.RemoveAll(filepath.Join(t.s.root, instanceDir, name))
return os.RemoveAll(filepath.Join(t.s.root, "instances", name))
}
func (t *Txn) SetCurrent(key, name string, global, def bool) error {
@@ -140,38 +122,15 @@ func (t *Txn) SetCurrent(key, name string, global, def bool) error {
h := toHash(key)
if def {
if err := ioutils.AtomicWriteFile(filepath.Join(t.s.root, defaultsDir, h), []byte(name), 0600); err != nil {
if err := ioutils.AtomicWriteFile(filepath.Join(t.s.root, "defaults", h), []byte(name), 0600); err != nil {
return err
}
} else {
os.RemoveAll(filepath.Join(t.s.root, defaultsDir, h)) // ignore error
os.RemoveAll(filepath.Join(t.s.root, "defaults", h)) // ignore error
}
return nil
}
func (t *Txn) UpdateLastActivity(ng *NodeGroup) error {
return ioutils.AtomicWriteFile(filepath.Join(t.s.root, activityDir, ng.Name), []byte(time.Now().UTC().Format(time.RFC3339)), 0600)
}
func (t *Txn) GetLastActivity(ng *NodeGroup) (la time.Time, _ error) {
dt, err := os.ReadFile(filepath.Join(t.s.root, activityDir, ng.Name))
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
return la, nil
}
return la, err
}
return time.Parse(time.RFC3339, string(dt))
}
func (t *Txn) RemoveLastActivity(name string) error {
name, err := ValidateName(name)
if err != nil {
return err
}
return os.RemoveAll(filepath.Join(t.s.root, activityDir, name))
}
func (t *Txn) reset(key string) error {
dt, err := json.Marshal(current{Key: key})
if err != nil {
@@ -215,7 +174,7 @@ func (t *Txn) Current(key string) (*NodeGroup, error) {
h := toHash(key)
dt, err = os.ReadFile(filepath.Join(t.s.root, defaultsDir, h))
dt, err = os.ReadFile(filepath.Join(t.s.root, "defaults", h))
if err != nil {
if os.IsNotExist(err) {
t.reset(key)
@@ -240,6 +199,8 @@ type current struct {
Global bool
}
var namePattern = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9\.\-_]*$`)
func toHash(in string) string {
return digest.FromBytes([]byte(in)).Hex()[:20]
}

View File

@@ -92,7 +92,6 @@ func TestNodeManagement(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "mybuild", ng.Name)
require.Equal(t, "mydriver", ng.Driver)
require.True(t, !ng.LastActivity.IsZero())
_, err = txn.NodeGroupByName("mybuild2")
require.Error(t, err)

View File

@@ -7,10 +7,10 @@ import (
"github.com/docker/buildx/store"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/resolver"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker"
buildkitdconfig "github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/pkg/errors"
)
@@ -24,6 +24,19 @@ func GetStore(dockerCli command.Cli) (*store.Txn, func(), error) {
return s.Txn()
}
// GetCurrentEndpoint returns the current default endpoint value
func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
name := dockerCli.CurrentContext()
if name != "default" {
return name, nil
}
de, err := GetDockerEndpoint(dockerCli, name)
if err != nil {
return "", errors.Errorf("docker endpoint for %q not found", name)
}
return de, nil
}
func GetProxyConfig(dockerCli command.Cli) map[string]string {
cfg := dockerCli.ConfigFile()
host := dockerCli.Client().DaemonHost()
@@ -50,9 +63,31 @@ func GetProxyConfig(dockerCli command.Cli) map[string]string {
return m
}
// GetDockerEndpoint returns docker endpoint string for given context
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return "", err
}
for _, l := range list {
if l.Name == name {
ep, ok := l.Endpoints["docker"]
if !ok {
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
}
typed, ok := ep.(docker.EndpointMeta)
if !ok {
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
}
return typed.Host, nil
}
}
return "", nil
}
// GetCurrentInstance finds the current builder instance
func GetCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
ep, err := GetCurrentEndpoint(dockerCli)
if err != nil {
return nil, err
}
@@ -79,25 +114,25 @@ func GetNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.No
return ng, nil
}
if name == "default" {
name = dockerCli.CurrentContext()
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
ng = &store.NodeGroup{
Name: name,
return &store.NodeGroup{
Name: "default",
Nodes: []store.Node{
{
Name: name,
Name: "default",
Endpoint: name,
},
},
}
if ng.LastActivity, err = txn.GetLastActivity(ng); err != nil {
return nil, err
}
return ng, nil
}, nil
}
}

View File

@@ -2,15 +2,12 @@ package store
import (
"os"
"regexp"
"strings"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/pkg/errors"
)
var namePattern = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9\.\-_]*$`)
func ValidateName(s string) (string, error) {
if !namePattern.MatchString(s) {
return "", errors.Errorf("invalid name %s, name needs to start with a letter and may not contain symbols, except ._-", s)

View File

@@ -1,79 +0,0 @@
package buildflags
import (
"encoding/csv"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
)
func CanonicalizeAttest(attestType string, in string) string {
if in == "" {
return ""
}
if b, err := strconv.ParseBool(in); err == nil {
return fmt.Sprintf("type=%s,enabled=%t", attestType, b)
}
return fmt.Sprintf("type=%s,%s", attestType, in)
}
func ParseAttests(in []string) (map[string]*string, error) {
out := map[string]*string{}
for _, in := range in {
in := in
attestType, enabled, err := parseAttest(in)
if err != nil {
return nil, err
}
k := "attest:" + attestType
if _, ok := out[k]; ok {
return nil, errors.Errorf("duplicate attestation field %s", attestType)
}
if enabled {
out[k] = &in
} else {
out[k] = nil
}
}
return out, nil
}
func parseAttest(in string) (string, bool, error) {
if in == "" {
return "", false, nil
}
csvReader := csv.NewReader(strings.NewReader(in))
fields, err := csvReader.Read()
if err != nil {
return "", false, err
}
attestType := ""
enabled := true
for _, field := range fields {
key, value, ok := strings.Cut(field, "=")
if !ok {
return "", false, errors.Errorf("invalid value %s", field)
}
key = strings.TrimSpace(strings.ToLower(key))
switch key {
case "type":
attestType = value
case "enabled":
enabled, err = strconv.ParseBool(value)
if err != nil {
return "", false, err
}
}
}
if attestType == "" {
return "", false, errors.Errorf("attestation type not specified")
}
return attestType, enabled, nil
}

Some files were not shown because too many files have changed in this diff Show More