Compare commits

..

5 Commits

Author SHA1 Message Date
Tõnis Tiigi
266c0eac61 Merge pull request #752 from tonistiigi/v0.6-mount-path-fix
[v0.6] container-driver: fix volume destination for cache
2021-08-28 10:07:48 -07:00
Tonis Tiigi
0b320faf34 github: fix running ci in version branches
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 9833420a03)
2021-08-27 21:20:17 -07:00
Sebastiaan van Stijn
944fd444f6 container-driver: fix volume destination for cache
The container-driver creates a Linux container (as there currently isn't a
Windows version of buildkitd). However, the defaults are platform specific.

Buildx was using the defaults from the buildkit `util/appdefault' package,
which resulted in Buildx running on a Windows client to create a Linux
container that used the Windows location, which causes it to fail:

    invalid mount config for type "volume": invalid mount path: 'C:/ProgramData/buildkitd/.buildstate' mount path must be absolute

This patch hard-codes the destination to the default Linux path.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 93867d02f0)
2021-08-27 21:03:27 -07:00
CrazyMax
de7dfb925d Merge pull request #743 from tonistiigi/v0.6-client-ctx
[v0.6] use long-running context for client initialization
2021-08-20 18:34:15 +02:00
Tonis Tiigi
43e51fd089 use long-running context for client initialization
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 422ba60b04)
2021-08-20 09:30:23 -07:00
1639 changed files with 32825 additions and 147823 deletions

View File

@@ -16,15 +16,61 @@ on:
env: env:
REPO_SLUG: "docker/buildx-bin" REPO_SLUG: "docker/buildx-bin"
REPO_SLUG_ORIGIN: "moby/buildkit:master" REPO_SLUG_ORIGIN: "moby/buildkit:master"
RELEASE_OUT: "./release-out" CACHEKEY_BINARIES: "binaries"
PLATFORMS: "linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64"
jobs: jobs:
build: base:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
-
name: Cache ${{ env.CACHEKEY_BINARIES }}
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
-
name: Build ${{ env.CACHEKEY_BINARIES }}
run: |
./hack/build_ci_first_pass binaries
env:
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
CACHEDIR_TO: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new
-
# FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850
name: Move cache
run: |
rm -rf /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
mv /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
test:
runs-on: ubuntu-latest
needs: [base]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Cache ${{ env.CACHEKEY_BINARIES }}
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v1
@@ -37,15 +83,90 @@ jobs:
name: Test name: Test
run: | run: |
make test make test
env:
TEST_COVERAGE: 1
TESTFLAGS: -v --parallel=6 --timeout=20m
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
- -
name: Send to Codecov name: Send to Codecov
uses: codecov/codecov-action@v2 uses: codecov/codecov-action@v2
with: with:
file: ./coverage/coverage.txt file: ./coverage/coverage.txt
cross:
runs-on: ubuntu-latest
needs: [base]
steps:
- -
name: Build binaries name: Checkout
uses: actions/checkout@v2
-
name: Cache ${{ env.CACHEKEY_BINARIES }}
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
-
name: Cross
run: | run: |
make release make cross
env:
TARGETPLATFORM: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
binaries:
runs-on: ubuntu-latest
needs: [test, cross]
env:
RELEASE_OUT: ./release-out
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Prepare
id: prep
run: |
TAG=pr
if [[ $GITHUB_REF == refs/tags/v* ]]; then
TAG=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
fi
echo ::set-output name=tag::${TAG}
-
name: Cache ${{ env.CACHEKEY_BINARIES }}
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
-
name: Build ${{ steps.prep.outputs.tag }}
run: |
./hack/release ${{ env.RELEASE_OUT }}
env:
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
- -
name: Upload artifacts name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@@ -64,7 +185,6 @@ jobs:
type=ref,event=branch type=ref,event=branch
type=ref,event=pr type=ref,event=pr
type=semver,pattern={{version}} type=semver,pattern={{version}}
bake-target: meta-helper
- -
name: Login to DockerHub name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@@ -74,13 +194,15 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- -
name: Build and push image name: Build and push image
uses: docker/bake-action@v1 uses: docker/build-push-action@v2
with: with:
files: | context: .
./docker-bake.hcl target: binaries
${{ steps.meta.outputs.bake-file }}
targets: image-cross
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
cache-from: type=local,src=/tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
platforms: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- -
name: GitHub Release name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
@@ -90,3 +212,4 @@ jobs:
with: with:
draft: true draft: true
files: ${{ env.RELEASE_OUT }}/* files: ${{ env.RELEASE_OUT }}/*
name: ${{ steps.prep.outputs.tag }}

View File

@@ -1,100 +0,0 @@
name: e2e
on:
workflow_dispatch:
push:
branches:
- 'master'
- 'v[0-9]*'
pull_request:
branches:
- 'master'
- 'v[0-9]*'
jobs:
driver:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
driver:
- docker
- docker-container
- kubernetes
buildkit:
- moby/buildkit:buildx-stable-1
- moby/buildkit:master
buildkit-cfg:
- bkcfg-false
- bkcfg-true
multi-node:
- mnode-false
- mnode-true
platforms:
- linux/amd64,linux/arm64
include:
- driver: kubernetes
driver-opt: qemu.install=true
exclude:
- driver: docker
multi-node: mnode-true
- driver: docker
buildkit-cfg: bkcfg-true
- driver: docker-container
multi-node: mnode-true
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
-
name: Install buildx
run: |
make install
docker buildx version
-
name: Init env vars
run: |
# BuildKit cfg
if [ "${{ matrix.buildkit-cfg }}" = "bkcfg-true" ]; then
cat > "/tmp/buildkitd.toml" <<EOL
[worker.oci]
max-parallelism = 2
EOL
echo "BUILDKIT_CFG=/tmp/buildkitd.toml" >> $GITHUB_ENV
fi
# Multi node
if [ "${{ matrix.multi-node }}" = "mnode-true" ]; then
echo "MULTI_NODE=1" >> $GITHUB_ENV
else
echo "MULTI_NODE=0" >> $GITHUB_ENV
fi
-
name: Install k3s
if: matrix.driver == 'kubernetes'
uses: debianmaster/actions-k3s@v1.0.3
id: k3s
with:
version: v1.21.2-k3s1
-
name: Config k3s
if: matrix.driver == 'kubernetes'
run: |
(set -x ; cat ${{ steps.k3s.outputs.kubeconfig }})
-
name: Check k3s nodes
if: matrix.driver == 'kubernetes'
run: |
kubectl get nodes
-
name: Test
run: |
make test-driver
env:
BUILDKIT_IMAGE: ${{ matrix.buildkit }}
DRIVER: ${{ matrix.driver }}
DRIVER_OPT: ${{ matrix.driver-opt }}
PLATFORMS: ${{ matrix.platforms }}

View File

@@ -13,6 +13,9 @@ on:
- 'master' - 'master'
- 'v[0-9]*' - 'v[0-9]*'
env:
REPO_SLUG_ORIGIN: "moby/buildkit:master"
jobs: jobs:
validate: validate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -27,22 +30,12 @@ jobs:
- -
name: Checkout name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
- -
name: Run name: Run
run: | run: |
make ${{ matrix.target }} make ${{ matrix.target }}
validate-docs-yaml:
runs-on: ubuntu-latest
needs:
- validate
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Run
run: |
make docs
env:
FORMATS: yaml

View File

@@ -1,13 +1,6 @@
# This file lists all individuals having contributed content to the repository. # This file lists all individuals having contributed content to the repository.
# For how it is generated, see hack/dockerfiles/authors.Dockerfile. # For how it is generated, see `hack/generate-authors`.
CrazyMax <github@crazymax.dev>
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
Tibor Vass <tibor@docker.com> Tibor Vass <tibor@docker.com>
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com> Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
Tõnis Tiigi <tonistiigi@gmail.com> Tõnis Tiigi <tonistiigi@gmail.com>
Ulysses Souza <ulyssessouza@gmail.com>
Wang Jinglei <morlay.null@gmail.com>

40
AUTHORS
View File

@@ -1,45 +1,7 @@
# This file lists all individuals having contributed content to the repository. # This file lists all individuals having contributed content to the repository.
# For how it is generated, see hack/dockerfiles/authors.Dockerfile. # For how it is generated, see `scripts/generate-authors.sh`.
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Alex Couture-Beil <alex@earthly.dev>
Andrew Haines <andrew.haines@zencargo.com>
Andy MacKinlay <admackin@users.noreply.github.com>
Anthony Poschen <zanven42@gmail.com>
Artur Klauser <Artur.Klauser@computer.org>
Batuhan Apaydın <developerguy2@gmail.com>
Bin Du <bindu@microsoft.com> Bin Du <bindu@microsoft.com>
Brandon Philips <brandon@ifup.org>
Brian Goff <cpuguy83@gmail.com> Brian Goff <cpuguy83@gmail.com>
CrazyMax <github@crazymax.dev>
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Devin Bayer <dev@doubly.so>
Djordje Lukic <djordje.lukic@docker.com>
Dmytro Makovey <dmytro.makovey@docker.com>
Donghui Wang <977675308@qq.com>
faust <faustin@fala.red>
Felipe Santos <felipecassiors@gmail.com>
Fernando Miguel <github@FernandoMiguel.net>
gfrancesco <gfrancesco@users.noreply.github.com>
gracenoah <gracenoahgh@gmail.com>
Hollow Man <hollowman@hollowman.ml>
Ilya Dmitrichenko <errordeveloper@gmail.com>
Jack Laxson <jackjrabbit@gmail.com>
Jean-Yves Gastaud <jygastaud@gmail.com>
khs1994 <khs1994@khs1994.com>
Kotaro Adachi <k33asby@gmail.com>
l00397676 <lujingxiao@huawei.com>
Michal Augustyn <michal.augustyn@mail.com>
Patrick Van Stee <patrick@vanstee.me>
Saul Shanabrook <s.shanabrook@gmail.com>
Sebastiaan van Stijn <github@gone.nl>
SHIMA Tatsuya <ts1s1andn@gmail.com>
Silvin Lubecki <silvin.lubecki@docker.com>
Solomon Hykes <sh.github.6811@hykes.org>
Sune Keller <absukl@almbrand.dk>
Tibor Vass <tibor@docker.com> Tibor Vass <tibor@docker.com>
Tõnis Tiigi <tonistiigi@gmail.com> Tõnis Tiigi <tonistiigi@gmail.com>
Ulysses Souza <ulyssessouza@gmail.com>
Wang Jinglei <morlay.null@gmail.com>
Xiang Dai <764524258@qq.com>
zelahi <elahi.zuhayr@gmail.com>

View File

@@ -1,16 +1,24 @@
# syntax=docker/dockerfile:1.3 # syntax=docker/dockerfile:1.2
ARG GO_VERSION=1.17 ARG DOCKERD_VERSION=19.03
ARG DOCKERD_VERSION=20.10.8 ARG CLI_VERSION=19.03
FROM docker:$DOCKERD_VERSION AS dockerd-release FROM docker:$DOCKERD_VERSION AS dockerd-release
# xx is a helper for cross-compilation # xx is a helper for cross-compilation
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest FROM --platform=$BUILDPLATFORM golang:1.16-alpine AS golatest
FROM golatest AS gobase FROM golatest AS go-linux
FROM golatest AS go-darwin
FROM golatest AS go-windows-amd64
FROM golatest AS go-windows-386
FROM golatest AS go-windows-arm
FROM --platform=$BUILDPLATFORM golang:1.17beta1-alpine AS go-windows-arm64
FROM go-windows-${TARGETARCH} AS go-windows
FROM go-${TARGETOS} AS gobase
COPY --from=xx / / COPY --from=xx / /
RUN apk add --no-cache file git RUN apk add --no-cache file git
ENV GOFLAGS=-mod=vendor ENV GOFLAGS=-mod=vendor
@@ -25,22 +33,24 @@ RUN --mount=target=. \
FROM gobase AS buildx-build FROM gobase AS buildx-build
ENV CGO_ENABLED=0 ENV CGO_ENABLED=0
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN --mount=type=bind,target=. \ RUN --mount=target=. --mount=target=/root/.cache,type=cache \
--mount=type=cache,target=/root/.cache \ --mount=target=/go/pkg/mod,type=cache \
--mount=type=cache,target=/go/pkg/mod \ --mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \ set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
xx-verify --static /usr/bin/buildx xx-verify --static /usr/bin/buildx
FROM buildx-build AS test FROM buildx-build AS integration-tests
RUN --mount=type=bind,target=. \ COPY . .
--mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
go test -v -coverprofile=/tmp/coverage.txt -covermode=atomic ./... && \
go tool cover -func=/tmp/coverage.txt
FROM scratch AS test-coverage # FROM golang:1.12-alpine AS docker-cli-build
COPY --from=test /tmp/coverage.txt /coverage.txt # RUN apk add -U git bash coreutils gcc musl-dev
# ENV CGO_ENABLED=0
# ARG REPO=github.com/tiborvass/cli
# ARG BRANCH=cli-plugin-aliases
# ARG CLI_VERSION
# WORKDIR /go/src/github.com/docker/cli
# RUN git clone git://$REPO . && git checkout $BRANCH
# RUN ./scripts/build/binary
FROM scratch AS binaries-unix FROM scratch AS binaries-unix
COPY --from=buildx-build /usr/bin/buildx / COPY --from=buildx-build /usr/bin/buildx /
@@ -53,25 +63,24 @@ COPY --from=buildx-build /usr/bin/buildx /buildx.exe
FROM binaries-$TARGETOS AS binaries FROM binaries-$TARGETOS AS binaries
# Release
FROM --platform=$BUILDPLATFORM alpine AS releaser FROM --platform=$BUILDPLATFORM alpine AS releaser
WORKDIR /work WORKDIR /work
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN --mount=from=binaries \ RUN --mount=from=binaries \
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \ --mount=source=/tmp/.version,target=/tmp/.version,from=buildx-version \
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')" mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
FROM scratch AS release FROM scratch AS release
COPY --from=releaser /out/ / COPY --from=releaser /out/ /
# Shell FROM alpine AS demo-env
FROM docker:$DOCKERD_VERSION AS dockerd-release
FROM alpine AS shell
RUN apk add --no-cache iptables tmux git vim less openssh RUN apk add --no-cache iptables tmux git vim less openssh
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
COPY ./hack/demo-env/tmux.conf /root/.tmux.conf COPY ./hack/demo-env/tmux.conf /root/.tmux.conf
COPY --from=dockerd-release /usr/local/bin /usr/local/bin COPY --from=dockerd-release /usr/local/bin /usr/local/bin
#COPY --from=docker-cli-build /go/src/github.com/docker/cli/build/docker /usr/local/bin
WORKDIR /work WORKDIR /work
COPY ./hack/demo-env/examples . COPY ./hack/demo-env/examples .
COPY --from=binaries / /usr/local/bin/ COPY --from=binaries / /usr/local/bin/

View File

@@ -1,51 +1,32 @@
ifneq (, $(BUILDX_BIN))
export BUILDX_CMD = $(BUILDX_BIN)
else ifneq (, $(shell docker buildx version))
export BUILDX_CMD = docker buildx
else ifneq (, $(shell which buildx))
export BUILDX_CMD = $(which buildx)
else
$(error "Buildx is required: https://github.com/docker/buildx#installing")
endif
export BIN_OUT = ./bin
export RELEASE_OUT = ./release-out
shell: shell:
./hack/shell ./hack/shell
binaries: binaries:
$(BUILDX_CMD) bake binaries ./hack/binaries
binaries-cross: binaries-cross:
$(BUILDX_CMD) bake binaries-cross EXPORT_LOCAL=cross-out ./hack/cross
cross:
./hack/cross
install: binaries install: binaries
mkdir -p ~/.docker/cli-plugins mkdir -p ~/.docker/cli-plugins
install bin/buildx ~/.docker/cli-plugins/docker-buildx install bin/buildx ~/.docker/cli-plugins/docker-buildx
release:
./hack/release
validate-all: lint test validate-vendor validate-docs
lint: lint:
$(BUILDX_CMD) bake lint ./hack/lint
test: test:
$(BUILDX_CMD) bake test ./hack/test
validate-vendor: validate-vendor:
$(BUILDX_CMD) bake validate-vendor ./hack/validate-vendor
validate-docs: validate-docs:
$(BUILDX_CMD) bake validate-docs ./hack/validate-docs
validate-authors: validate-all: lint test validate-vendor validate-docs
$(BUILDX_CMD) bake validate-authors
test-driver:
./hack/test-driver
vendor: vendor:
./hack/update-vendor ./hack/update-vendor
@@ -53,10 +34,7 @@ vendor:
docs: docs:
./hack/update-docs ./hack/update-docs
authors: generate-authors:
$(BUILDX_CMD) bake update-authors ./hack/generate-authors
mod-outdated: .PHONY: vendor lint shell binaries install binaries-cross validate-all generate-authors validate-docs docs
$(BUILDX_CMD) bake mod-outdated
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors

252
README.md
View File

@@ -5,8 +5,7 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/docker/buildx)](https://goreportcard.com/report/github.com/docker/buildx) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/buildx)](https://goreportcard.com/report/github.com/docker/buildx)
[![codecov](https://codecov.io/gh/docker/buildx/branch/master/graph/badge.svg)](https://codecov.io/gh/docker/buildx) [![codecov](https://codecov.io/gh/docker/buildx/branch/master/graph/badge.svg)](https://codecov.io/gh/docker/buildx)
`buildx` is a Docker CLI plugin for extended build capabilities with `buildx` is a Docker CLI plugin for extended build capabilities with [BuildKit](https://github.com/moby/buildkit).
[BuildKit](https://github.com/moby/buildkit).
Key features: Key features:
@@ -24,89 +23,70 @@ Key features:
- [Docker](#docker) - [Docker](#docker)
- [Binary release](#binary-release) - [Binary release](#binary-release)
- [From `Dockerfile`](#from-dockerfile) - [From `Dockerfile`](#from-dockerfile)
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
- [Building](#building) - [Building](#building)
- [with Docker 18.09+](#with-docker-1809)
- [with buildx or Docker 19.03](#with-buildx-or-docker-1903)
- [Getting started](#getting-started) - [Getting started](#getting-started)
- [Building with buildx](#building-with-buildx) - [Building with buildx](#building-with-buildx)
- [Working with builder instances](#working-with-builder-instances) - [Working with builder instances](#working-with-builder-instances)
- [Building multi-platform images](#building-multi-platform-images) - [Building multi-platform images](#building-multi-platform-images)
- [High-level build options](#high-level-build-options) - [High-level build options](#high-level-build-options)
- [Documentation](docs/reference/buildx.md) - [Documentation](docs/reference)
- [`buildx bake`](docs/reference/buildx_bake.md) - [`buildx build [OPTIONS] PATH | URL | -`](docs/reference/buildx_build.md)
- [`buildx build`](docs/reference/buildx_build.md) - [`buildx create [OPTIONS] [CONTEXT|ENDPOINT]`](docs/reference/buildx_create.md)
- [`buildx create`](docs/reference/buildx_create.md) - [`buildx use NAME`](docs/reference/buildx_use.md)
- [`buildx du`](docs/reference/buildx_du.md) - [`buildx inspect [NAME]`](docs/reference/buildx_inspect.md)
- [`buildx imagetools`](docs/reference/buildx_imagetools.md)
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
- [`buildx inspect`](docs/reference/buildx_inspect.md)
- [`buildx install`](docs/reference/buildx_install.md)
- [`buildx ls`](docs/reference/buildx_ls.md) - [`buildx ls`](docs/reference/buildx_ls.md)
- [`buildx prune`](docs/reference/buildx_prune.md) - [`buildx stop [NAME]`](docs/reference/buildx_stop.md)
- [`buildx rm`](docs/reference/buildx_rm.md) - [`buildx rm [NAME]`](docs/reference/buildx_rm.md)
- [`buildx stop`](docs/reference/buildx_stop.md) - [`buildx bake [OPTIONS] [TARGET...]`](docs/reference/buildx_bake.md)
- [`buildx uninstall`](docs/reference/buildx_uninstall.md) - [`buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]`](docs/reference/buildx_imagetools_create.md)
- [`buildx use`](docs/reference/buildx_use.md) - [`buildx imagetools inspect NAME`](docs/reference/buildx_imagetools_inspect.md)
- [`buildx version`](docs/reference/buildx_version.md) - [Setting buildx as default builder in Docker 19.03+](#setting-buildx-as-default-builder-in-docker-1903)
- [Contributing](#contributing) - [Contributing](#contributing)
# Installing # Installing
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer. Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer. A limited set of functionality works with older versions of Docker when invoking the binary directly.
A limited set of functionality works with older versions of Docker when
invoking the binary directly.
## Docker ### Docker
`buildx` comes bundled with Docker Desktop and in latest `buildx` comes bundled with Docker Desktop and in latest Docker CE packages, but may not be included in all Linux distros (in which case follow the binary release instructions).
[Docker CE packages](https://docs.docker.com/engine/install/), but may not be
included in third-party software components (in which case follow the
[binary release](#binary-release) instructions).
## Binary release ### Binary release
You can also download the latest `buildx` binary from the Download the latest binary release from https://github.com/docker/buildx/releases/latest and copy it to `~/.docker/cli-plugins` folder with name `docker-buildx`.
[GitHub releases](https://github.com/docker/buildx/releases/latest) page, copy
it to `~/.docker/cli-plugins` folder with name `docker-buildx` and change the
permission to execute:
```console Change the permission to execute:
$ chmod a+x ~/.docker/cli-plugins/docker-buildx ```sh
chmod a+x ~/.docker/cli-plugins/docker-buildx
``` ```
## From `Dockerfile` ### From `Dockerfile`
Here is how to use buildx inside a Dockerfile through the Here is how to use buildx inside a Dockerfile through the [`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
```Dockerfile ```Dockerfile
FROM docker FROM docker
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx COPY --from=docker/buildx-bin:latest /buildx /usr/libexec/docker/cli-plugins/docker-buildx
RUN docker buildx version RUN docker buildx version
``` ```
# Set buildx as the default builder
Running the command [`docker buildx install`](docs/reference/buildx_install.md)
sets up docker builder command as an alias to `docker buildx build`. This
results in the ability to have `docker build` use the current buildx builder.
To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_uninstall.md).
# Building # Building
```console
# Buildx 0.6+
$ docker buildx bake "git://github.com/docker/buildx"
$ mkdir -p ~/.docker/cli-plugins
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
# Docker 19.03+ ### with buildx or Docker 19.03+
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "git://github.com/docker/buildx" ```
$ export DOCKER_BUILDKIT=1
$ docker build --platform=local -o . git://github.com/docker/buildx
$ mkdir -p ~/.docker/cli-plugins $ mkdir -p ~/.docker/cli-plugins
$ mv buildx ~/.docker/cli-plugins/docker-buildx $ mv buildx ~/.docker/cli-plugins/docker-buildx
```
# Local ### with Docker 18.09+
```
$ git clone git://github.com/docker/buildx && cd buildx $ git clone git://github.com/docker/buildx && cd buildx
$ make install $ make install
``` ```
@@ -115,151 +95,65 @@ $ make install
## Building with buildx ## Building with buildx
Buildx is a Docker CLI plugin that extends the `docker build` command with the Buildx is a Docker CLI plugin that extends the `docker build` command with the full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit) builder toolkit. It provides the same user experience as `docker build` with many new features like creating scoped builder instances and building against multiple nodes concurrently.
full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit)
builder toolkit. It provides the same user experience as `docker build` with
many new features like creating scoped builder instances and building against
multiple nodes concurrently.
After installation, buildx can be accessed through the `docker buildx` command After installation, buildx can be accessed through the `docker buildx` command with Docker 19.03. `docker buildx build` is the command for starting a new build. With Docker versions older than 19.03 buildx binary can be called directly to access the `docker buildx` subcommands.
with Docker 19.03. `docker buildx build` is the command for starting a new
build. With Docker versions older than 19.03 buildx binary can be called
directly to access the `docker buildx` subcommands.
```console ```
$ docker buildx build . $ docker buildx build .
[+] Building 8.4s (23/32) [+] Building 8.4s (23/32)
=> ... => ...
``` ```
Buildx will always build using the BuildKit engine and does not require
`DOCKER_BUILDKIT=1` environment variable for starting builds.
The `docker buildx build` command supports features available for `docker build`, Buildx will always build using the BuildKit engine and does not require `DOCKER_BUILDKIT=1` environment variable for starting builds.
including features such as outputs configuration, inline build caching, and
specifying target platform. In addition, Buildx also supports new features that
are not yet available for regular `docker build` like building manifest lists,
distributed caching, and exporting build results to OCI image tarballs.
Buildx is supposed to be flexible and can be run in different configurations Buildx build command supports the features available for `docker build` including the new features in Docker 19.03 such as outputs configuration, inline build caching or specifying target platform. In addition, buildx supports new features not yet available for regular `docker build` like building manifest lists, distributed caching, exporting build results to OCI image tarballs etc.
that are exposed through a driver concept. Currently, we support a
[`docker` driver](docs/reference/buildx_create.md#docker-driver) that uses Buildx is supposed to be flexible and can be run in different configurations that are exposed through a driver concept. Currently, we support a "docker" driver that uses the BuildKit library bundled into the Docker daemon binary, and a "docker-container" driver that automatically launches BuildKit inside a Docker container. We plan to add more drivers in the future, for example, one that would allow running buildx inside an (unprivileged) container.
the BuildKit library bundled into the Docker daemon binary, a
[`docker-container` driver](docs/reference/buildx_create.md#docker-container-driver) The user experience of using buildx is very similar across drivers, but there are some features that are not currently supported by the "docker" driver, because the BuildKit library bundled into docker daemon currently uses a different storage component. In contrast, all images built with "docker" driver are automatically added to the "docker images" view by default, whereas when using other drivers the method for outputting an image needs to be selected with `--output`.
that automatically launches BuildKit inside a Docker container and a
[`kubernetes` driver](docs/reference/buildx_create.md#kubernetes-driver) to
spin up pods with defined BuildKit container image to build your images. We
plan to add more drivers in the future.
The user experience of using buildx is very similar across drivers, but there
are some features that are not currently supported by the `docker` driver,
because the BuildKit library bundled into docker daemon currently uses a
different storage component. In contrast, all images built with `docker` driver
are automatically added to the `docker images` view by default, whereas when
using other drivers the method for outputting an image needs to be selected
with `--output`.
## Working with builder instances ## Working with builder instances
By default, buildx will initially use the `docker` driver if it is supported, By default, buildx will initially use the "docker" driver if it is supported, providing a very similar user experience to the native `docker build`. But using a local shared daemon is only one way to build your applications.
providing a very similar user experience to the native `docker build`. Note tha
you must use a local shared daemon to build your applications.
Buildx allows you to create new instances of isolated builders. This can be Buildx allows you to create new instances of isolated builders. This can be used for getting a scoped environment for your CI builds that does not change the state of the shared daemon or for isolating the builds for different projects. You can create a new instance for a set of remote nodes, forming a build farm, and quickly switch between them.
used for getting a scoped environment for your CI builds that does not change
the state of the shared daemon or for isolating the builds for different
projects. You can create a new instance for a set of remote nodes, forming a
build farm, and quickly switch between them.
You can create new instances using the [`docker buildx create`](docs/reference/buildx_create.md) New instances can be created with `docker buildx create` command. This will create a new builder instance with a single node based on your current configuration. To use a remote node you can specify the `DOCKER_HOST` or remote context name while creating the new builder. After creating a new instance you can manage its lifecycle with the `inspect`, `stop` and `rm` commands and list all available builders with `ls`. After creating a new builder you can also append new nodes to it.
command. This creates a new builder instance with a single node based on your
current configuration.
To use a remote node you can specify the `DOCKER_HOST` or the remote context name To switch between different builders, use `docker buildx use <name>`. After running this command the build commands would automatically keep using this builder.
while creating the new builder. After creating a new instance, you can manage its
lifecycle using the [`docker buildx inspect`](docs/reference/buildx_inspect.md),
[`docker buildx stop`](docs/reference/buildx_stop.md), and
[`docker buildx rm`](docs/reference/buildx_rm.md) commands. To list all
available builders, use [`buildx ls`](docs/reference/buildx_ls.md). After
creating a new builder you can also append new nodes to it.
To switch between different builders, use [`docker buildx use <name>`](docs/reference/buildx_use.md). Docker 19.03 also features a new `docker context` command that can be used for giving names for remote Docker API endpoints. Buildx integrates with `docker context` so that all of your contexts automatically get a default builder instance. While creating a new builder instance or when adding a node to it you can also set the context name as the target.
After running this command, the build commands will automatically use this
builder.
Docker also features a [`docker context`](https://docs.docker.com/engine/reference/commandline/context/)
command that can be used for giving names for remote Docker API endpoints.
Buildx integrates with `docker context` so that all of your contexts
automatically get a default builder instance. While creating a new builder
instance or when adding a node to it you can also set the context name as the
target.
## Building multi-platform images ## Building multi-platform images
BuildKit is designed to work well for building for multiple platforms and not BuildKit is designed to work well for building for multiple platforms and not only for the architecture and operating system that the user invoking the build happens to run.
only for the architecture and operating system that the user invoking the build
happens to run.
When you invoke a build, you can set the `--platform` flag to specify the target When invoking a build, the `--platform` flag can be used to specify the target platform for the build output, (e.g. linux/amd64, linux/arm64, darwin/amd64). When the current builder instance is backed by the "docker-container" driver, multiple platforms can be specified together. In this case, a manifest list will be built, containing images for all of the specified architectures. When this image is used in `docker run` or `docker service`, Docker will pick the correct image based on the nodes platform.
platform for the build output, (for example, `linux/amd64`, `linux/arm64`, or
`darwin/amd64`).
When the current builder instance is backed by the `docker-container` or Multi-platform images can be built by mainly three different strategies that are all supported by buildx and Dockerfiles. You can use the QEMU emulation support in the kernel, build on multiple native nodes using the same builder instance or use a stage in Dockerfile to cross-compile to different architectures.
`kubernetes` driver, you can specify multiple platforms together. In this case,
it builds a manifest list which contains images for all specified architectures.
When you use this image in [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)
or [`docker service`](https://docs.docker.com/engine/reference/commandline/service/),
Docker picks the correct image based on the node's platform.
You can build multi-platform images using three different strategies that are QEMU is the easiest way to get started if your node already supports it (e.g. if you are using Docker Desktop). It requires no changes to your Dockerfile and BuildKit will automatically detect the secondary architectures that are available. When BuildKit needs to run a binary for a different architecture it will automatically load it through a binary registered in the binfmt_misc handler. For QEMU binaries registered with binfmt_misc on the host OS to work transparently inside containers they must be registered with the fix_binary flag. This requires a kernel >= 4.8 and binfmt-support >= 2.1.7. You can check for proper registration by checking if `F` is among the flags in `/proc/sys/fs/binfmt_misc/qemu-*`. While Docker Desktop comes preconfigured with binfmt_misc support for additional platforms, for other installations it likely needs to be installed using [`tonistiigi/binfmt`](https://github.com/tonistiigi/binfmt) image.
supported by Buildx and Dockerfiles:
1. Using the QEMU emulation support in the kernel ```
2. Building on multiple native nodes using the same builder instance
3. Using a stage in Dockerfile to cross-compile to different architectures
QEMU is the easiest way to get started if your node already supports it (for
example. if you are using Docker Desktop). It requires no changes to your
Dockerfile and BuildKit automatically detects the secondary architectures that
are available. When BuildKit needs to run a binary for a different architecture,
it automatically loads it through a binary registered in the `binfmt_misc`
handler.
For QEMU binaries registered with `binfmt_misc` on the host OS to work
transparently inside containers they must be registered with the `fix_binary`
flag. This requires a kernel >= 4.8 and binfmt-support >= 2.1.7. You can check
for proper registration by checking if `F` is among the flags in
`/proc/sys/fs/binfmt_misc/qemu-*`. While Docker Desktop comes preconfigured
with `binfmt_misc` support for additional platforms, for other installations
it likely needs to be installed using [`tonistiigi/binfmt`](https://github.com/tonistiigi/binfmt)
image.
```console
$ docker run --privileged --rm tonistiigi/binfmt --install all $ docker run --privileged --rm tonistiigi/binfmt --install all
``` ```
Using multiple native nodes provide better support for more complicated cases Using multiple native nodes provides better support for more complicated cases not handled by QEMU and generally have better performance. Additional nodes can be added to the builder instance with `--append` flag.
that are not handled by QEMU and generally have better performance. You can
add additional nodes to the builder instance using the `--append` flag.
Assuming contexts `node-amd64` and `node-arm64` exist in `docker context ls`; ```
# assuming contexts node-amd64 and node-arm64 exist in "docker context ls"
```console
$ docker buildx create --use --name mybuild node-amd64 $ docker buildx create --use --name mybuild node-amd64
mybuild mybuild
$ docker buildx create --append --name mybuild node-arm64 $ docker buildx create --append --name mybuild node-arm64
$ docker buildx build --platform linux/amd64,linux/arm64 . $ docker buildx build --platform linux/amd64,linux/arm64 .
``` ```
Finally, depending on your project, the language that you use may have good Finally, depending on your project, the language that you use may have good support for cross-compilation. In that case, multi-stage builds in Dockerfiles can be effectively used to build binaries for the platform specified with `--platform` using the native architecture of the build node. List of build arguments like `BUILDPLATFORM` and `TARGETPLATFORM` are available automatically inside your Dockerfile and can be leveraged by the processes running as part of your build.
support for cross-compilation. In that case, multi-stage builds in Dockerfiles
can be effectively used to build binaries for the platform specified with
`--platform` using the native architecture of the build node. A list of build
arguments like `BUILDPLATFORM` and `TARGETPLATFORM` is available automatically
inside your Dockerfile and can be leveraged by the processes running as part
of your build.
```dockerfile ```
FROM --platform=$BUILDPLATFORM golang:alpine AS build FROM --platform=$BUILDPLATFORM golang:alpine AS build
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG BUILDPLATFORM ARG BUILDPLATFORM
@@ -268,31 +162,25 @@ FROM alpine
COPY --from=build /log /log COPY --from=build /log /log
``` ```
You can also use [`tonistiigi/xx`](https://github.com/tonistiigi/xx) Dockerfile
cross-compilation helpers for more advanced use-cases.
## High-level build options ## High-level build options
Buildx also aims to provide support for high-level build concepts that go beyond Buildx also aims to provide support for higher level build concepts that go beyond invoking a single build command. We want to support building all the images in your application together and let the users define project specific reusable build flows that can then be easily invoked by anyone.
invoking a single build command. We want to support building all the images in
your application together and let the users define project specific reusable
build flows that can then be easily invoked by anyone.
BuildKit efficiently handles multiple concurrent build requests and BuildKit has great support for efficiently handling multiple concurrent build requests and deduplicating work. While build commands can be combined with general-purpose command runners (eg. make), these tools generally invoke builds in sequence and therefore cant leverage the full potential of BuildKit parallelization or combine BuildKits output for the user. For this use case we have added a command called `docker buildx bake`.
de-duplicating work. The build commands can be combined with general-purpose
command runners (for example, `make`). However, these tools generally invoke
builds in sequence and therefore cannot leverage the full potential of BuildKit
parallelization, or combine BuildKits output for the user. For this use case,
we have added a command called [`docker buildx bake`](docs/reference/buildx_bake.md).
The `bake` command supports building images from compose files, similar to Currently, the bake command supports building images from compose files, similar to `compose build` but allowing all the services to be built concurrently as part of a single request.
[`docker-compose build`](https://docs.docker.com/compose/reference/build/),
but allowing all the services to be built concurrently as part of a single There is also support for custom build rules from HCL/JSON files allowing better code reuse and different target groups. The design of bake is in very early stages and we are looking for feedback from users.
request.
[`buildx bake` Reference Docs](docs/reference/buildx_bake.md)
# Setting buildx as default builder in Docker 19.03+
Running `docker buildx install` sets up `docker builder` command as an alias to `docker buildx`. This results in the ability to have `docker build` use the current buildx builder.
To remove this alias, you can run `docker buildx uninstall`.
There is also support for custom build rules from HCL/JSON files allowing
better code reuse and different target groups. The design of bake is in very
early stages and we are looking for feedback from users.
# Contributing # Contributing

View File

@@ -2,7 +2,6 @@ package bake
import ( import (
"context" "context"
"encoding/csv"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@@ -30,11 +29,6 @@ type File struct {
Data []byte Data []byte
} }
type Override struct {
Value string
ArrValue []string
}
func defaultFilenames() []string { func defaultFilenames() []string {
return []string{ return []string{
"docker-compose.yml", // support app "docker-compose.yml", // support app
@@ -67,29 +61,29 @@ func ReadLocalFiles(names []string) ([]File, error) {
return out, nil return out, nil
} }
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, []*Group, error) { func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, error) {
c, err := ParseFiles(files, defaults) c, err := ParseFiles(files, defaults)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
o, err := c.newOverrides(overrides) o, err := c.newOverrides(overrides)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
m := map[string]*Target{} m := map[string]*Target{}
for _, n := range targets { for _, n := range targets {
for _, n := range c.ResolveGroup(n) { for _, n := range c.ResolveGroup(n) {
t, err := c.ResolveTarget(n, o) t, err := c.ResolveTarget(n, o)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
if t != nil { if t != nil {
m[n] = t m[n] = t
} }
} }
} }
return m, c.Groups, nil return m, nil
} }
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) { func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
@@ -246,8 +240,8 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
return names, nil return names, nil
} }
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) { func (c Config) newOverrides(v []string) (map[string]*Target, error) {
m := map[string]map[string]Override{} m := map[string]*Target{}
for _, v := range v { for _, v := range v {
parts := strings.SplitN(v, "=", 2) parts := strings.SplitN(v, "=", 2)
@@ -266,41 +260,73 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
return nil, err return nil, err
} }
kk := strings.SplitN(parts[0], ".", 2)
for _, name := range names { for _, name := range names {
t, ok := m[name] t, ok := m[name]
if !ok { if !ok {
t = map[string]Override{} t = &Target{}
m[name] = t
} }
o := t[kk[1]]
switch keys[1] { switch keys[1] {
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh": case "context":
if len(parts) == 2 { t.Context = &parts[1]
o.ArrValue = append(o.ArrValue, parts[1]) case "dockerfile":
} t.Dockerfile = &parts[1]
case "args": case "args":
if len(keys) != 3 { if len(keys) != 3 {
return nil, errors.Errorf("invalid key %s, args requires name", parts[0]) return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
} }
if t.Args == nil {
t.Args = map[string]string{}
}
if len(parts) < 2 { if len(parts) < 2 {
v, ok := os.LookupEnv(keys[2]) v, ok := os.LookupEnv(keys[2])
if !ok { if ok {
continue t.Args[keys[2]] = v
} }
o.Value = v } else {
t.Args[keys[2]] = parts[1]
} }
fallthrough case "labels":
if len(keys) != 3 {
return nil, errors.Errorf("invalid key %s, lanels requires name", parts[0])
}
if t.Labels == nil {
t.Labels = map[string]string{}
}
t.Labels[keys[2]] = parts[1]
case "tags":
t.Tags = append(t.Tags, parts[1])
case "cache-from":
t.CacheFrom = append(t.CacheFrom, parts[1])
case "cache-to":
t.CacheTo = append(t.CacheTo, parts[1])
case "target":
s := parts[1]
t.Target = &s
case "secrets":
t.Secrets = append(t.Secrets, parts[1])
case "ssh":
t.SSH = append(t.SSH, parts[1])
case "platform":
t.Platforms = append(t.Platforms, parts[1])
case "output":
t.Outputs = append(t.Outputs, parts[1])
case "no-cache":
noCache, err := strconv.ParseBool(parts[1])
if err != nil {
return nil, errors.Errorf("invalid value %s for boolean key no-cache", parts[1])
}
t.NoCache = &noCache
case "pull":
pull, err := strconv.ParseBool(parts[1])
if err != nil {
return nil, errors.Errorf("invalid value %s for boolean key pull", parts[1])
}
t.Pull = &pull
default: default:
if len(parts) == 2 { return nil, errors.Errorf("unknown key: %s", keys[1])
o.Value = parts[1]
}
} }
m[name] = t
t[kk[1]] = o
} }
} }
return m, nil return m, nil
@@ -332,7 +358,7 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
return targets return targets
} }
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) { func (c Config) ResolveTarget(name string, overrides map[string]*Target) (*Target, error) {
t, err := c.target(name, map[string]struct{}{}, overrides) t, err := c.target(name, map[string]struct{}{}, overrides)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -348,7 +374,7 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
return t, nil return t, nil
} }
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) { func (c Config) target(name string, visited map[string]struct{}, overrides map[string]*Target) (*Target, error) {
if _, ok := visited[name]; ok { if _, ok := visited[name]; ok {
return nil, nil return nil, nil
} }
@@ -378,10 +404,9 @@ func (c Config) target(name string, visited map[string]struct{}, overrides map[s
m.Merge(tt) m.Merge(tt)
m.Merge(t) m.Merge(t)
tt = m tt = m
if err := tt.AddOverrides(overrides[name]); err != nil { if override, ok := overrides[name]; ok {
return nil, err tt.Merge(override)
} }
tt.normalize() tt.normalize()
return tt, nil return tt, nil
} }
@@ -482,81 +507,6 @@ func (t *Target) Merge(t2 *Target) {
t.Inherits = append(t.Inherits, t2.Inherits...) t.Inherits = append(t.Inherits, t2.Inherits...)
} }
func (t *Target) AddOverrides(overrides map[string]Override) error {
for key, o := range overrides {
value := o.Value
keys := strings.SplitN(key, ".", 2)
switch keys[0] {
case "context":
t.Context = &value
case "dockerfile":
t.Dockerfile = &value
case "args":
if len(keys) != 2 {
return errors.Errorf("args require name")
}
if t.Args == nil {
t.Args = map[string]string{}
}
t.Args[keys[1]] = value
case "labels":
if len(keys) != 2 {
return errors.Errorf("labels require name")
}
if t.Labels == nil {
t.Labels = map[string]string{}
}
t.Labels[keys[1]] = value
case "tags":
t.Tags = o.ArrValue
case "cache-from":
t.CacheFrom = o.ArrValue
case "cache-to":
t.CacheTo = o.ArrValue
case "target":
t.Target = &value
case "secrets":
t.Secrets = o.ArrValue
case "ssh":
t.SSH = o.ArrValue
case "platform":
t.Platforms = o.ArrValue
case "output":
t.Outputs = o.ArrValue
case "no-cache":
noCache, err := strconv.ParseBool(value)
if err != nil {
return errors.Errorf("invalid value %s for boolean key no-cache", value)
}
t.NoCache = &noCache
case "pull":
pull, err := strconv.ParseBool(value)
if err != nil {
return errors.Errorf("invalid value %s for boolean key pull", value)
}
t.Pull = &pull
case "push":
_, err := strconv.ParseBool(value)
if err != nil {
return errors.Errorf("invalid value %s for boolean key push", value)
}
if len(t.Outputs) == 0 {
t.Outputs = append(t.Outputs, "type=image,push=true")
} else {
for i, output := range t.Outputs {
if typ := parseOutputType(output); typ == "image" || typ == "registry" {
t.Outputs[i] = t.Outputs[i] + ",push=" + value
}
}
}
default:
return errors.Errorf("unknown key: %s", keys[0])
}
}
return nil
}
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) { func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
m2 := make(map[string]build.Options, len(m)) m2 := make(map[string]build.Options, len(m))
for k, v := range m { for k, v := range m {
@@ -716,20 +666,3 @@ func removeDupes(s []string) []string {
func isRemoteResource(str string) bool { func isRemoteResource(str string) bool {
return urlutil.IsGitURL(str) || urlutil.IsURL(str) return urlutil.IsGitURL(str) || urlutil.IsURL(str)
} }
func parseOutputType(str string) string {
csvReader := csv.NewReader(strings.NewReader(str))
fields, err := csvReader.Read()
if err != nil {
return ""
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 2 {
if parts[0] == "type" {
return parts[1]
}
}
}
return ""
}

View File

@@ -34,7 +34,7 @@ target "webapp" {
ctx := context.TODO() ctx := context.TODO()
t.Run("NoOverrides", func(t *testing.T) { t.Run("NoOverrides", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil) m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(m)) require.Equal(t, 1, len(m))
@@ -46,7 +46,7 @@ target "webapp" {
}) })
t.Run("InvalidTargetOverrides", func(t *testing.T) { t.Run("InvalidTargetOverrides", func(t *testing.T) {
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"nosuchtarget.context=foo"}, nil) _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"nosuchtarget.context=foo"}, nil)
require.NotNil(t, err) require.NotNil(t, err)
require.Equal(t, err.Error(), "could not find any target matching 'nosuchtarget'") require.Equal(t, err.Error(), "could not find any target matching 'nosuchtarget'")
}) })
@@ -56,7 +56,7 @@ target "webapp" {
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv") os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
defer os.Unsetenv("VAR_FROM_ENV" + t.Name()) defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{ m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
"webapp.args.VAR_UNSET", "webapp.args.VAR_UNSET",
"webapp.args.VAR_EMPTY=", "webapp.args.VAR_EMPTY=",
"webapp.args.VAR_SET=bananas", "webapp.args.VAR_SET=bananas",
@@ -85,7 +85,7 @@ target "webapp" {
// building leaf but overriding parent fields // building leaf but overriding parent fields
t.Run("parent", func(t *testing.T) { t.Run("parent", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{ m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
"webDEP.args.VAR_INHERITED=override", "webDEP.args.VAR_INHERITED=override",
"webDEP.args.VAR_BOTH=override", "webDEP.args.VAR_BOTH=override",
}, nil) }, nil)
@@ -96,23 +96,23 @@ target "webapp" {
}) })
t.Run("ContextOverride", func(t *testing.T) { t.Run("ContextOverride", func(t *testing.T) {
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil) _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil)
require.NotNil(t, err) require.NotNil(t, err)
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil) m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "foo", *m["webapp"].Context) require.Equal(t, "foo", *m["webapp"].Context)
}) })
t.Run("NoCacheOverride", func(t *testing.T) { t.Run("NoCacheOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil) m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, false, *m["webapp"].NoCache) require.Equal(t, false, *m["webapp"].NoCache)
}) })
t.Run("PullOverride", func(t *testing.T) { t.Run("PullOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil) m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, false, *m["webapp"].Pull) require.Equal(t, false, *m["webapp"].Pull)
}) })
@@ -172,58 +172,13 @@ target "webapp" {
} }
for _, test := range cases { for _, test := range cases {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil) m, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil)
test.check(t, m, err) test.check(t, m, err)
}) })
} }
}) })
} }
func TestPushOverride(t *testing.T) {
t.Parallel()
fp := File{
Name: "docker-bake.hc",
Data: []byte(
`target "app" {
output = ["type=image,compression=zstd"]
}`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
fp = File{
Name: "docker-bake.hc",
Data: []byte(
`target "app" {
output = ["type=image,compression=zstd"]
}`),
}
ctx = context.TODO()
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
fp = File{
Name: "docker-bake.hc",
Data: []byte(
`target "app" {
}`),
}
ctx = context.TODO()
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
}
func TestReadTargetsCompose(t *testing.T) { func TestReadTargetsCompose(t *testing.T) {
t.Parallel() t.Parallel()
@@ -260,7 +215,7 @@ services:
ctx := context.TODO() ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil) m, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(m)) require.Equal(t, 3, len(m))
@@ -273,8 +228,9 @@ services:
} }
func TestHCLCwdPrefix(t *testing.T) { func TestHCLCwdPrefix(t *testing.T) {
fp := File{ fp := File{
Name: "docker-bake.hcl", Name: "docker-bake.hc",
Data: []byte( Data: []byte(
`target "app" { `target "app" {
context = "cwd://foo" context = "cwd://foo"
@@ -282,7 +238,7 @@ func TestHCLCwdPrefix(t *testing.T) {
}`), }`),
} }
ctx := context.TODO() ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil) m, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(m)) require.Equal(t, 1, len(m))
@@ -295,32 +251,3 @@ func TestHCLCwdPrefix(t *testing.T) {
require.Equal(t, "test", *m["app"].Dockerfile) require.Equal(t, "test", *m["app"].Dockerfile)
require.Equal(t, "foo", *m["app"].Context) require.Equal(t, "foo", *m["app"].Context)
} }
func TestOverrideMerge(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(
`target "app" {
platforms = ["linux/amd64"]
output = ["foo"]
}`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{
"app.platform=linux/arm",
"app.platform=linux/ppc64le",
"app.output=type=registry",
}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(m))
_, ok := m["app"]
require.True(t, ok)
_, err = TargetsToBuildOpt(m, &Input{})
require.NoError(t, err)
require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
require.Equal(t, 1, len(m["app"].Outputs))
require.Equal(t, "type=registry", m["app"].Outputs[0])
}

View File

@@ -80,15 +80,13 @@ func ParseCompose(dt []byte) (*Config, error) {
return val, ok return val, ok
})), })),
CacheFrom: s.Build.CacheFrom, CacheFrom: s.Build.CacheFrom,
} // TODO: add platforms
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
return nil, err
} }
if s.Build.Target != "" { if s.Build.Target != "" {
target := s.Build.Target target := s.Build.Target
t.Target = &target t.Target = &target
} }
if len(t.Tags) == 0 && s.Image != "" { if s.Image != "" {
t.Tags = []string{s.Image} t.Tags = []string{s.Image}
} }
c.Targets = append(c.Targets, t) c.Targets = append(c.Targets, t)
@@ -113,82 +111,3 @@ func flatten(in compose.MappingWithEquals) compose.Mapping {
} }
return out return out
} }
// composeExtTarget converts Compose build extension x-bake to bake Target
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
if ext, ok := exts["x-bake"]; ok {
for key, val := range ext.(map[string]interface{}) {
switch key {
case "tags":
if res, k := val.(string); k {
t.Tags = append(t.Tags, res)
} else {
for _, res := range val.([]interface{}) {
t.Tags = append(t.Tags, res.(string))
}
}
case "cache-from":
t.CacheFrom = []string{} // Needed to override the main field
if res, k := val.(string); k {
t.CacheFrom = append(t.CacheFrom, res)
} else {
for _, res := range val.([]interface{}) {
t.CacheFrom = append(t.CacheFrom, res.(string))
}
}
case "cache-to":
if res, k := val.(string); k {
t.CacheTo = append(t.CacheTo, res)
} else {
for _, res := range val.([]interface{}) {
t.CacheTo = append(t.CacheTo, res.(string))
}
}
case "secret":
if res, k := val.(string); k {
t.Secrets = append(t.Secrets, res)
} else {
for _, res := range val.([]interface{}) {
t.Secrets = append(t.Secrets, res.(string))
}
}
case "ssh":
if res, k := val.(string); k {
t.SSH = append(t.SSH, res)
} else {
for _, res := range val.([]interface{}) {
t.SSH = append(t.SSH, res.(string))
}
}
case "platforms":
if res, k := val.(string); k {
t.Platforms = append(t.Platforms, res)
} else {
for _, res := range val.([]interface{}) {
t.Platforms = append(t.Platforms, res.(string))
}
}
case "output":
if res, k := val.(string); k {
t.Outputs = append(t.Outputs, res)
} else {
for _, res := range val.([]interface{}) {
t.Outputs = append(t.Outputs, res.(string))
}
}
case "pull":
if res, ok := val.(bool); ok {
t.Pull = &res
}
case "no-cache":
if res, ok := val.(bool); ok {
t.NoCache = &res
}
default:
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
}
}
}
return nil
}

View File

@@ -214,70 +214,3 @@ networks:
_, err := ParseCompose(dt) _, err := ParseCompose(dt)
require.NoError(t, err) require.NoError(t, err)
} }
func TestComposeExt(t *testing.T) {
var dt = []byte(`
services:
addon:
image: ct-addon:bar
build:
context: .
dockerfile: ./Dockerfile
cache_from:
- user/app:cache
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
tags:
- ct-addon:foo
- ct-addon:alp
platforms:
- linux/amd64
- linux/arm64
cache-from:
- type=local,src=path/to/cache
cache-to: local,dest=path/to/cache
pull: true
aws:
image: ct-fake-aws:bar
build:
dockerfile: ./aws.Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
secret:
- id=mysecret,src=/local/secret
- id=mysecret2,src=/local/secret2
ssh: default
platforms: linux/arm64
output: type=docker
no-cache: true
`)
c, err := ParseCompose(dt)
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:alp"})
require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
require.Equal(t, c.Targets[0].CacheFrom, []string{"type=local,src=path/to/cache"})
require.Equal(t, c.Targets[0].CacheTo, []string{"local,dest=path/to/cache"})
require.Equal(t, c.Targets[0].Pull, newBool(true))
require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
require.Equal(t, c.Targets[1].SSH, []string{"default"})
require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
require.Equal(t, c.Targets[1].NoCache, newBool(true))
}
func newBool(val bool) *bool {
b := val
return &b
}

View File

@@ -21,10 +21,9 @@ type Input struct {
} }
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) { func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
var filename string st, filename, ok := detectHTTPContext(url)
st, ok := detectGitContext(url)
if !ok { if !ok {
st, filename, ok = detectHTTPContext(url) st, ok = detectGitContext(url)
if !ok { if !ok {
return nil, nil, errors.Errorf("not url context") return nil, nil, errors.Errorf("not url context")
} }

View File

@@ -3,8 +3,6 @@ package build
import ( import (
"bufio" "bufio"
"context" "context"
"crypto/rand"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -21,8 +19,7 @@ import (
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/resolver" clitypes "github.com/docker/cli/cli/config/types"
"github.com/docker/cli/opts"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
dockerclient "github.com/docker/docker/client" dockerclient "github.com/docker/docker/client"
@@ -51,26 +48,26 @@ var (
) )
type Options struct { type Options struct {
Inputs Inputs Inputs Inputs
Tags []string
Labels map[string]string
BuildArgs map[string]string
Pull bool
ImageIDFile string
ExtraHosts []string
NetworkMode string
Allow []entitlements.Entitlement NoCache bool
BuildArgs map[string]string Target string
CacheFrom []client.CacheOptionsEntry Platforms []specs.Platform
CacheTo []client.CacheOptionsEntry Exports []client.ExportEntry
CgroupParent string Session []session.Attachable
Exports []client.ExportEntry
ExtraHosts []string CacheFrom []client.CacheOptionsEntry
ImageIDFile string CacheTo []client.CacheOptionsEntry
Labels map[string]string
NetworkMode string Allow []entitlements.Entitlement
NoCache bool // DockerTarget
Platforms []specs.Platform
Pull bool
Session []session.Attachable
ShmSize opts.MemBytes
Tags []string
Target string
Ulimits *opts.UlimitOpt
} }
type Inputs struct { type Inputs struct {
@@ -86,7 +83,10 @@ type DriverInfo struct {
Name string Name string
Platform []specs.Platform Platform []specs.Platform
Err error Err error
ImageOpt imagetools.Opt }
type Auth interface {
GetAuthConfig(registryHostname string) (clitypes.AuthConfig, error)
} }
type DockerAPI interface { type DockerAPI interface {
@@ -186,8 +186,8 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
return m return m
} }
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) { func resolveDrivers(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
dps, clients, err := resolveDriversBase(ctx, drivers, opt, pw) dps, clients, err := resolveDriversBase(ctx, drivers, auth, opt, pw)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@@ -214,7 +214,8 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
} }
err = eg.Wait() err = eg.Wait()
tracing.FinishWithError(span, err) span.RecordError(err)
span.End()
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@@ -227,7 +228,7 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
return dps, clients, nil return dps, clients, nil
} }
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) { func resolveDriversBase(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
availablePlatforms := map[string]int{} availablePlatforms := map[string]int{}
for i, d := range drivers { for i, d := range drivers {
for _, p := range d.Platform { for _, p := range d.Platform {
@@ -333,7 +334,7 @@ func toRepoOnly(in string) (string, error) {
return strings.Join(out, ","), nil return strings.Join(out, ","), nil
} }
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) { func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
defers := make([]func(), 0, 2) defers := make([]func(), 0, 2)
releaseF := func() { releaseF := func() {
for _, f := range defers { for _, f := range defers {
@@ -399,10 +400,6 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
AllowedEntitlements: opt.Allow, AllowedEntitlements: opt.Allow,
} }
if opt.CgroupParent != "" {
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
}
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok { if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
if v, _ := strconv.ParseBool(v); v { if v, _ := strconv.ParseBool(v); v {
so.FrontendAttrs["multi-platform"] = "true" so.FrontendAttrs["multi-platform"] = "true"
@@ -510,13 +507,6 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
} }
defers = append(defers, releaseLoad) defers = append(defers, releaseLoad)
if sharedKey := so.LocalDirs["context"]; sharedKey != "" {
if p, err := filepath.Abs(sharedKey); err == nil {
sharedKey = filepath.Base(p)
}
so.SharedKey = sharedKey + ":" + tryNodeIdentifier(configDir)
}
if opt.Pull { if opt.Pull {
so.FrontendAttrs["image-resolve-mode"] = "pull" so.FrontendAttrs["image-resolve-mode"] = "pull"
} }
@@ -554,7 +544,7 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
case "", "default": case "", "default":
default: default:
return nil, nil, errors.Errorf("network mode %q not supported by buildkit. You can define a custom network for your builder using the network driver-opt in buildx create.", opt.NetworkMode) return nil, nil, errors.Errorf("network mode %q not supported by buildkit", opt.NetworkMode)
} }
// setup extrahosts // setup extrahosts
@@ -564,23 +554,10 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
} }
so.FrontendAttrs["add-hosts"] = extraHosts so.FrontendAttrs["add-hosts"] = extraHosts
// setup shm size
if opt.ShmSize.Value() > 0 {
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
}
// setup ulimits
ulimits, err := toBuildkitUlimits(opt.Ulimits)
if err != nil {
return nil, nil, err
} else if len(ulimits) > 0 {
so.FrontendAttrs["ulimit"] = ulimits
}
return &so, releaseF, nil return &so, releaseF, nil
} }
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) { func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, auth Auth, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
if len(drivers) == 0 { if len(drivers) == 0 {
return nil, errors.Errorf("driver required for build") return nil, errors.Errorf("driver required for build")
} }
@@ -607,7 +584,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
} }
} }
m, clients, err := resolveDrivers(ctx, drivers, opt, w) m, clients, err := resolveDrivers(ctx, drivers, auth, opt, w)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -632,7 +609,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
hasMobyDriver = true hasMobyDriver = true
} }
opt.Platforms = dp.platforms opt.Platforms = dp.platforms
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) { so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, w, func(name string) (io.WriteCloser, func(), error) {
return newDockerLoader(ctx, docker, name, w) return newDockerLoader(ctx, docker, name, w)
}) })
if err != nil { if err != nil {
@@ -687,12 +664,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
wg.Add(len(dps)) wg.Add(len(dps))
var pushNames string var pushNames string
var insecurePush bool
eg.Go(func() (err error) { eg.Go(func() (err error) {
defer func() { defer func() {
if span != nil { if span != nil {
tracing.FinishWithError(span, err) span.RecordError(err)
span.End()
} }
}() }()
pw := progress.WithPrefix(w, "default", false) pw := progress.WithPrefix(w, "default", false)
@@ -707,9 +684,8 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
resp[k] = res[0] resp[k] = res[0]
respMu.Unlock() respMu.Unlock()
if len(res) == 1 { if len(res) == 1 {
digest := res[0].ExporterResponse["containerimage.digest"]
if opt.ImageIDFile != "" { if opt.ImageIDFile != "" {
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644) return ioutil.WriteFile(opt.ImageIDFile, []byte(res[0].ExporterResponse["containerimage.digest"]), 0644)
} }
return nil return nil
} }
@@ -729,41 +705,22 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
} }
} }
if len(descs) > 0 { if len(descs) > 0 {
var imageopt imagetools.Opt itpull := imagetools.New(imagetools.Opt{
for _, dp := range dps { Auth: auth,
imageopt = drivers[dp.driverIndex].ImageOpt })
break
}
names := strings.Split(pushNames, ",") names := strings.Split(pushNames, ",")
if insecurePush {
insecureTrue := true
httpTrue := true
nn, err := reference.ParseNormalizedNamed(names[0])
if err != nil {
return err
}
imageopt.RegistryConfig = map[string]resolver.RegistryConfig{
reference.Domain(nn): {
Insecure: &insecureTrue,
PlainHTTP: &httpTrue,
},
}
}
itpull := imagetools.New(imageopt)
dt, desc, err := itpull.Combine(ctx, names[0], descs) dt, desc, err := itpull.Combine(ctx, names[0], descs)
if err != nil { if err != nil {
return err return err
} }
if opt.ImageIDFile != "" { if opt.ImageIDFile != "" {
if err := ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil { return ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644)
return err
}
} }
itpush := imagetools.New(imageopt) itpush := imagetools.New(imagetools.Opt{
Auth: auth,
})
for _, n := range names { for _, n := range names {
nn, err := reference.ParseNormalizedNamed(n) nn, err := reference.ParseNormalizedNamed(n)
@@ -808,9 +765,6 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
if err != nil { if err != nil {
return err return err
} }
if ok, _ := strconv.ParseBool(e.Attrs["registry.insecure"]); ok {
insecurePush = true
}
e.Attrs["name"] = names e.Attrs["name"] = names
e.Attrs["push-by-digest"] = "true" e.Attrs["push-by-digest"] = "true"
so.Exports[i].Attrs = e.Attrs so.Exports[i].Attrs = e.Attrs
@@ -1204,28 +1158,3 @@ func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser,
return wc, nil return wc, nil
} }
} }
var nodeIdentifierMu sync.Mutex
func tryNodeIdentifier(configDir string) (out string) {
nodeIdentifierMu.Lock()
defer nodeIdentifierMu.Unlock()
sessionFile := filepath.Join(configDir, ".buildNodeID")
if _, err := os.Lstat(sessionFile); err != nil {
if os.IsNotExist(err) { // create a new file with stored randomness
b := make([]byte, 8)
if _, err := rand.Read(b); err != nil {
return out
}
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
return out
}
}
}
dt, err := ioutil.ReadFile(sessionFile)
if err == nil {
return string(dt)
}
return
}

View File

@@ -7,7 +7,6 @@ import (
"os" "os"
"strings" "strings"
"github.com/docker/cli/opts"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -54,15 +53,3 @@ func toBuildkitExtraHosts(inp []string) (string, error) {
} }
return strings.Join(hosts, ","), nil return strings.Join(hosts, ","), nil
} }
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
if inp == nil || len(inp.GetList()) == 0 {
return "", nil
}
ulimits := make([]string, 0, len(inp.GetList()))
for _, ulimit := range inp.GetList() {
ulimits = append(ulimits, ulimit.String())
}
return strings.Join(ulimits, ","), nil
}

View File

@@ -6,10 +6,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/containerd/containerd/platforms"
"github.com/docker/buildx/bake" "github.com/docker/buildx/bake"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing" "github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -21,8 +19,8 @@ import (
type bakeOptions struct { type bakeOptions struct {
files []string files []string
overrides []string
printOnly bool printOnly bool
overrides []string
commonOptions commonOptions
} }
@@ -38,7 +36,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
}() }()
var url string var url string
var noTarget bool
cmdContext := "cwd://" cmdContext := "cwd://"
if len(targets) > 0 { if len(targets) > 0 {
@@ -57,7 +54,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
if len(targets) == 0 { if len(targets) == 0 {
targets = []string{"default"} targets = []string{"default"}
noTarget = true
} }
overrides := in.overrides overrides := in.overrides
@@ -65,7 +61,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
if in.exportLoad { if in.exportLoad {
return errors.Errorf("push and load may not be set together at the moment") return errors.Errorf("push and load may not be set together at the moment")
} }
overrides = append(overrides, "*.push=true") overrides = append(overrides, "*.output=type=registry")
} else if in.exportLoad { } else if in.exportLoad {
overrides = append(overrides, "*.output=type=docker") overrides = append(overrides, "*.output=type=docker")
} }
@@ -107,43 +103,21 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
return err return err
} }
t, g, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{ m, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
// Don't forget to update documentation if you add a new "BAKE_CMD_CONTEXT": cmdContext,
// built-in variable: docs/reference/buildx_bake.md#built-in-variables
"BAKE_CMD_CONTEXT": cmdContext,
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
}) })
if err != nil { if err != nil {
return err return err
} }
// this function can update target context string from the input so call before printOnly check // this function can update target context string from the input so call before printOnly check
bo, err := bake.TargetsToBuildOpt(t, inp) bo, err := bake.TargetsToBuildOpt(m, inp)
if err != nil { if err != nil {
return err return err
} }
if in.printOnly { if in.printOnly {
defGroup := map[string][]string{ dt, err := json.MarshalIndent(map[string]map[string]*bake.Target{"target": m}, "", " ")
"default": targets,
}
if noTarget {
for _, group := range g {
if group.Name != "default" {
continue
}
defGroup = map[string][]string{
"default": group.Targets,
}
}
}
dt, err := json.MarshalIndent(struct {
Group map[string][]string `json:"group,omitempty"`
Target map[string]*bake.Target `json:"target"`
}{
defGroup,
t,
}, "", " ")
if err != nil { if err != nil {
return err return err
} }
@@ -156,7 +130,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
return nil return nil
} }
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer) resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
if err != nil { if err != nil {
return err return err
} }
@@ -201,10 +175,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file") flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building") flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`") flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (eg: targetpattern.key=value)")
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)") flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --set=*.output=type=registry")
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --set=*.output=type=docker")
commonBuildFlags(&options.commonOptions, flags) commonBuildFlags(&options.commonOptions, flags)

View File

@@ -3,22 +3,18 @@ package commands
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/util/buildflags" "github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing" "github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
@@ -31,37 +27,53 @@ import (
const defaultTargetName = "default" const defaultTargetName = "default"
type buildOptions struct { type buildOptions struct {
commonOptions
contextPath string contextPath string
dockerfileName string dockerfileName string
tags []string
labels []string
buildArgs []string
allow []string cacheFrom []string
buildArgs []string cacheTo []string
cacheFrom []string target string
cacheTo []string platforms []string
cgroupParent string secrets []string
extraHosts []string ssh []string
imageIDFile string outputs []string
labels []string imageIDFile string
networkMode string extraHosts []string
outputs []string networkMode string
platforms []string
quiet bool // unimplemented
secrets []string squash bool
shmSize dockeropts.MemBytes quiet bool
ssh []string
tags []string allow []string
target string
ulimits *dockeropts.UlimitOpt // hidden
commonOptions // untrusted bool
// ulimits *opts.UlimitOpt
// memory opts.MemBytes
// memorySwap opts.MemSwapBytes
// shmSize opts.MemBytes
// cpuShares int64
// cpuPeriod int64
// cpuQuota int64
// cpuSetCpus string
// cpuSetMems string
// cgroupParent string
// isolation string
// compress bool
// securityOpt []string
} }
type commonOptions struct { type commonOptions struct {
builder string builder string
metadataFile string
noCache *bool noCache *bool
progress string progress string
pull *bool pull *bool
metadataFile string
// golangci-lint#826 // golangci-lint#826
// nolint:structcheck // nolint:structcheck
exportPush bool exportPush bool
@@ -70,6 +82,13 @@ type commonOptions struct {
} }
func runBuild(dockerCli command.Cli, in buildOptions) (err error) { func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
if in.squash {
return errors.Errorf("squash currently not implemented")
}
if in.quiet {
logrus.Warnf("quiet currently not implemented")
}
ctx := appcontext.Context() ctx := appcontext.Context()
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build") ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
@@ -89,29 +108,21 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
pull = *in.pull pull = *in.pull
} }
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
} else if in.quiet {
in.progress = "quiet"
}
opts := build.Options{ opts := build.Options{
Inputs: build.Inputs{ Inputs: build.Inputs{
ContextPath: in.contextPath, ContextPath: in.contextPath,
DockerfilePath: in.dockerfileName, DockerfilePath: in.dockerfileName,
InStream: os.Stdin, InStream: os.Stdin,
}, },
BuildArgs: listToMap(in.buildArgs, true),
ExtraHosts: in.extraHosts,
ImageIDFile: in.imageIDFile,
Labels: listToMap(in.labels, false),
NetworkMode: in.networkMode,
NoCache: noCache,
Pull: pull,
ShmSize: in.shmSize,
Tags: in.tags, Tags: in.tags,
Labels: listToMap(in.labels, false),
BuildArgs: listToMap(in.buildArgs, true),
Pull: pull,
NoCache: noCache,
Target: in.target, Target: in.target,
Ulimits: in.ulimits, ImageIDFile: in.imageIDFile,
ExtraHosts: in.extraHosts,
NetworkMode: in.networkMode,
} }
platforms, err := platformutil.Parse(in.platforms) platforms, err := platformutil.Parse(in.platforms)
@@ -203,59 +214,43 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
contextPathHash = in.contextPath contextPathHash = in.contextPath
} }
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile) return buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
}
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) error {
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
if err != nil { if err != nil {
return err return err
} }
if in.quiet {
fmt.Println(imageID)
}
return nil
}
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
if err != nil {
return "", err
}
ctx2, cancel := context.WithCancel(context.TODO()) ctx2, cancel := context.WithCancel(context.TODO())
defer cancel() defer cancel()
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode) printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer) resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
err1 := printer.Wait() err1 := printer.Wait()
if err == nil { if err == nil {
err = err1 err = err1
} }
if err != nil { if err != nil {
return "", err return err
} }
if len(metadataFile) > 0 && resp != nil { if len(metadataFile) > 0 && resp != nil {
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ") mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
if err != nil { if err != nil {
return "", err return err
} }
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil { if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
return "", err return err
} }
} }
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err return err
}
func newBuildOptions() buildOptions {
ulimits := make(map[string]*units.Ulimit)
return buildOptions{
ulimits: dockeropts.NewUlimitOpt(&ulimits),
}
} }
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command { func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
options := newBuildOptions() var options buildOptions
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "build [OPTIONS] PATH | URL | -", Use: "build [OPTIONS] PATH | URL | -",
@@ -265,138 +260,103 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.contextPath = args[0] options.contextPath = args[0]
options.builder = rootOpts.builder options.builder = rootOpts.builder
cmd.Flags().VisitAll(checkWarnedFlags)
return runBuild(dockerCli, options) return runBuild(dockerCli, options)
}, },
} }
var platformsDefault []string
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
platformsDefault = []string{v}
}
flags := cmd.Flags() flags := cmd.Flags()
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)") flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --output=type=registry")
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"}) flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --output=type=docker")
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag in the 'name:tag' format")
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables") flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"}) flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)") flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"}) flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image") flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`") flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (eg. user/app:cache, type=local,src=path/to/dir)")
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"}) flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
flags.Var(options.ulimits, "ulimit", "Ulimit options") flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement, e.g. network.host, security.insecure")
// not implemented
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (host:ip)")
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
flags.MarkHidden("quiet")
flags.MarkHidden("squash")
// hidden flags // hidden flags
var ignore string var ignore string
var ignoreSlice []string var ignoreSlice []string
var ignoreBool bool var ignoreBool bool
var ignoreInt int64 var ignoreInt int64
flags.StringVar(&ignore, "ulimit", "", "Ulimit options")
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip") flags.MarkHidden("ulimit")
flags.MarkHidden("compress")
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
flags.MarkHidden("isolation")
flags.SetAnnotation("isolation", "flag-warn", []string{"isolation flag is deprecated with BuildKit."})
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options") flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
flags.MarkHidden("security-opt") flags.MarkHidden("security-opt")
flags.SetAnnotation("security-opt", "flag-warn", []string{`security-opt flag is deprecated. "RUN --security=insecure" should be used with BuildKit.`}) flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
flags.MarkHidden("compress")
flags.BoolVar(&ignoreBool, "squash", false, "Squash newly built layers into a single new layer")
flags.MarkHidden("squash")
flags.SetAnnotation("squash", "flag-warn", []string{"experimental flag squash is removed with BuildKit. You should squash inside build using a multi-stage Dockerfile for efficiency."})
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit") flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
flags.MarkHidden("memory") flags.MarkHidden("memory")
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
flags.MarkHidden("memory-swap") flags.MarkHidden("memory-swap")
flags.StringVar(&ignore, "shm-size", "", "Size of /dev/shm")
flags.MarkHidden("shm-size")
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)") flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
flags.MarkHidden("cpu-shares") flags.MarkHidden("cpu-shares")
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
flags.MarkHidden("cpu-period") flags.MarkHidden("cpu-period")
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flags.MarkHidden("cpu-quota") flags.MarkHidden("cpu-quota")
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-cpus") flags.MarkHidden("cpuset-cpus")
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
flags.MarkHidden("cpuset-mems") flags.MarkHidden("cpuset-mems")
flags.StringVar(&ignore, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.MarkHidden("cgroup-parent")
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
flags.MarkHidden("isolation")
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build") flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
flags.MarkHidden("rm") flags.MarkHidden("rm")
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers") flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
flags.MarkHidden("force-rm") flags.MarkHidden("force-rm")
platformsDefault := []string{}
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
platformsDefault = []string{v}
}
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build: id=mysecret,src=/local/secret")
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])")
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
commonBuildFlags(&options.commonOptions, flags) commonBuildFlags(&options.commonOptions, flags)
return cmd return cmd
} }
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) { func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image") options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output") flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image") options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file") flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
} }
func checkWarnedFlags(f *pflag.Flag) {
if !f.Changed {
return
}
for t, m := range f.Annotations {
switch t {
case "flag-warn":
logrus.Warn(m[0])
break
}
}
}
func listToMap(values []string, defaultEnv bool) map[string]string { func listToMap(values []string, defaultEnv bool) map[string]string {
result := make(map[string]string, len(values)) result := make(map[string]string, len(values))
for _, value := range values { for _, value := range values {

View File

@@ -1,19 +1,14 @@
package commands package commands
import ( import (
"bytes"
"context"
"encoding/csv" "encoding/csv"
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
"strings" "strings"
"time"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/google/shlex" "github.com/google/shlex"
@@ -34,7 +29,6 @@ type createOptions struct {
flags string flags string
configFile string configFile string
driverOpts []string driverOpts []string
bootstrap bool
// upgrade bool // perform upgrade of the driver // upgrade bool // perform upgrade of the driver
} }
@@ -76,7 +70,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
return errors.Errorf("failed to find driver %q", in.driver) return errors.Errorf("failed to find driver %q", in.driver)
} }
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -144,7 +138,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`") return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
} }
ep, err = storeutil.GetCurrentEndpoint(dockerCli) ep, err = getCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -176,7 +170,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
if in.use && ep != "" { if in.use && ep != "" {
current, err := storeutil.GetCurrentEndpoint(dockerCli) current, err := getCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -185,21 +179,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
} }
ngi := &nginfo{ng: ng}
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
return err
}
if in.bootstrap {
if _, err = boot(ctx, ngi); err != nil {
return err
}
}
fmt.Printf("%s\n", ng.Name) fmt.Printf("%s\n", ng.Name)
return nil return nil
} }
@@ -207,12 +186,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
func createCmd(dockerCli command.Cli) *cobra.Command { func createCmd(dockerCli command.Cli) *cobra.Command {
var options createOptions var options createOptions
var drivers bytes.Buffer var drivers []string
for _, d := range driver.GetFactories() { for s := range driver.GetFactories() {
if len(drivers.String()) > 0 { drivers = append(drivers, s)
drivers.WriteString(", ")
}
drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
} }
cmd := &cobra.Command{ cmd := &cobra.Command{
@@ -227,20 +203,18 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.StringVar(&options.name, "name", "", "Builder instance name") flags.StringVar(&options.name, "name", "", "Builder instance name")
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String())) flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %v)", drivers))
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name") flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon") flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
flags.StringVar(&options.configFile, "config", "", "BuildKit config file") flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node") flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver") flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it") flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it") flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
flags.BoolVar(&options.use, "use", false, "Set the current builder instance") flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
// hide builder persistent flag for this command _ = flags
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd return cmd
} }

View File

@@ -6,8 +6,6 @@ import (
"io/ioutil" "io/ioutil"
"strings" "strings"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
@@ -20,7 +18,6 @@ import (
) )
type createOptions struct { type createOptions struct {
builder string
files []string files []string
tags []string tags []string
dryrun bool dryrun bool
@@ -104,32 +101,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) r := imagetools.New(imagetools.Opt{
if err != nil { Auth: dockerCli.ConfigFile(),
return err })
}
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return err
}
r := imagetools.New(imageopt)
if sourceRefs { if sourceRefs {
eg, ctx2 := errgroup.WithContext(ctx) eg, ctx2 := errgroup.WithContext(ctx)
@@ -178,7 +152,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
} }
// new resolver cause need new auth // new resolver cause need new auth
r = imagetools.New(imageopt) r = imagetools.New(imagetools.Opt{
Auth: dockerCli.ConfigFile(),
})
for _, t := range tags { for _, t := range tags {
if err := r.Push(ctx, t, desc, dt); err != nil { if err := r.Push(ctx, t, desc, dt); err != nil {
@@ -248,24 +224,26 @@ func parseSource(in string) (*src, error) {
return &s, nil return &s, nil
} }
func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command { func createCmd(dockerCli command.Cli) *cobra.Command {
var options createOptions var options createOptions
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "create [OPTIONS] [SOURCE] [SOURCE...]", Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
Short: "Create a new image based on source images", Short: "Create a new image based on source images",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.builder = opts.Builder
return runCreate(dockerCli, options, args) return runCreate(dockerCli, options, args)
}, },
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file") flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file")
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image") flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing") flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest") flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
_ = flags
return cmd return cmd
} }

View File

@@ -5,8 +5,6 @@ import (
"os" "os"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -16,38 +14,15 @@ import (
) )
type inspectOptions struct { type inspectOptions struct {
raw bool raw bool
builder string
} }
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error { func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) r := imagetools.New(imagetools.Opt{
if err != nil { Auth: dockerCli.ConfigFile(),
return err })
}
defer release()
var ng *store.NodeGroup
if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
if err != nil {
return err
}
} else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
if err != nil {
return err
}
}
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return err
}
r := imagetools.New(imageopt)
dt, desc, err := r.Get(ctx, name) dt, desc, err := r.Get(ctx, name)
if err != nil { if err != nil {
@@ -71,7 +46,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
return nil return nil
} }
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command { func inspectCmd(dockerCli command.Cli) *cobra.Command {
var options inspectOptions var options inspectOptions
cmd := &cobra.Command{ cmd := &cobra.Command{
@@ -79,13 +54,15 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
Short: "Show details of image in the registry", Short: "Show details of image in the registry",
Args: cli.ExactArgs(1), Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
options.builder = rootOpts.Builder
return runInspect(dockerCli, options, args[0]) return runInspect(dockerCli, options, args[0])
}, },
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest") flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
_ = flags
return cmd return cmd
} }

View File

@@ -5,19 +5,15 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
type RootOptions struct { func RootCmd(dockerCli command.Cli) *cobra.Command {
Builder string
}
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "imagetools", Use: "imagetools",
Short: "Commands to work on images in registry", Short: "Commands to work on images in registry",
} }
cmd.AddCommand( cmd.AddCommand(
inspectCmd(dockerCli, opts), inspectCmd(dockerCli),
createCmd(dockerCli, opts), createCmd(dockerCli),
) )
return cmd return cmd

View File

@@ -8,13 +8,17 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/docker/buildx/build"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
) )
type inspectOptions struct { type inspectOptions struct {
@@ -22,10 +26,23 @@ type inspectOptions struct {
builder string builder string
} }
type dinfo struct {
di *build.DriverInfo
info *driver.Info
platforms []specs.Platform
err error
}
type nginfo struct {
ng *store.NodeGroup
drivers []dinfo
err error
}
func runInspect(dockerCli command.Cli, in inspectOptions) error { func runInspect(dockerCli command.Cli, in inspectOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -34,12 +51,12 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
var ng *store.NodeGroup var ng *store.NodeGroup
if in.builder != "" { if in.builder != "" {
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder) ng, err = getNodeGroup(txn, dockerCli, in.builder)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
ng, err = storeutil.GetCurrentInstance(txn, dockerCli) ng, err = getCurrentInstance(txn, dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -65,7 +82,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
var bootNgi *nginfo var bootNgi *nginfo
if in.bootstrap { if in.bootstrap {
var ok bool var ok bool
ok, err = boot(ctx, ngi) ok, err = boot(ctx, ngi, dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -132,7 +149,50 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting") flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting")
_ = flags
return cmd return cmd
} }
func boot(ctx context.Context, ngi *nginfo, dockerCli command.Cli) (bool, error) {
toBoot := make([]int, 0, len(ngi.drivers))
for i, d := range ngi.drivers {
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
continue
}
if d.info.Status != driver.Running {
toBoot = append(toBoot, i)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
if err != nil {
ngi.drivers[idx].err = err
}
return nil
})
}(idx)
}
err := eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}

View File

@@ -3,7 +3,6 @@ package commands
import ( import (
"os" "os"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config"
@@ -49,8 +48,5 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
Hidden: true, Hidden: true,
} }
// hide builder persistent flag for this command
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd return cmd
} }

View File

@@ -10,8 +10,6 @@ import (
"time" "time"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -26,7 +24,7 @@ type lsOptions struct {
func runLs(dockerCli command.Cli, in lsOptions) error { func runLs(dockerCli command.Cli, in lsOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -81,7 +79,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
} }
currentName := "default" currentName := "default"
current, err := storeutil.GetCurrentInstance(txn, dockerCli) current, err := getCurrentInstance(txn, dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -149,8 +147,5 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
}, },
} }
// hide builder persistent flag for this command
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd return cmd
} }

View File

@@ -139,7 +139,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)") flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=24h')")
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache") flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output") flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
@@ -19,14 +18,14 @@ type rmOptions struct {
func runRm(dockerCli command.Cli, in rmOptions) error { func runRm(dockerCli command.Cli, in rmOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
defer release() defer release()
if in.builder != "" { if in.builder != "" {
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder) ng, err := getNodeGroup(txn, dockerCli, in.builder)
if err != nil { if err != nil {
return err return err
} }
@@ -37,7 +36,7 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
return err1 return err1
} }
ng, err := storeutil.GetCurrentInstance(txn, dockerCli) ng, err := getCurrentInstance(txn, dockerCli)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -12,8 +12,7 @@ import (
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command { func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Short: "Docker Buildx", Short: "Build with BuildKit",
Long: `Extended build capabilities with BuildKit`,
Use: name, Use: name,
} }
if isPlugin { if isPlugin {
@@ -48,7 +47,7 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
versionCmd(dockerCli), versionCmd(dockerCli),
pruneCmd(dockerCli, opts), pruneCmd(dockerCli, opts),
duCmd(dockerCli, opts), duCmd(dockerCli, opts),
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: opts.builder}), imagetoolscmd.RootCmd(dockerCli),
) )
} }

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
@@ -18,14 +17,14 @@ type stopOptions struct {
func runStop(dockerCli command.Cli, in stopOptions) error { func runStop(dockerCli command.Cli, in stopOptions) error {
ctx := appcontext.Context() ctx := appcontext.Context()
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
defer release() defer release()
if in.builder != "" { if in.builder != "" {
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder) ng, err := getNodeGroup(txn, dockerCli, in.builder)
if err != nil { if err != nil {
return err return err
} }
@@ -35,7 +34,7 @@ func runStop(dockerCli command.Cli, in stopOptions) error {
return nil return nil
} }
ng, err := storeutil.GetCurrentInstance(txn, dockerCli) ng, err := getCurrentInstance(txn, dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -62,6 +61,12 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
}, },
} }
flags := cmd.Flags()
// flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
_ = flags
return cmd return cmd
} }

View File

@@ -3,7 +3,6 @@ package commands
import ( import (
"os" "os"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config"
@@ -55,8 +54,5 @@ func uninstallCmd(dockerCli command.Cli) *cobra.Command {
Hidden: true, Hidden: true,
} }
// hide builder persistent flag for this command
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd return cmd
} }

View File

@@ -3,7 +3,6 @@ package commands
import ( import (
"os" "os"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -17,7 +16,7 @@ type useOptions struct {
} }
func runUse(dockerCli command.Cli, in useOptions) error { func runUse(dockerCli command.Cli, in useOptions) error {
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -29,7 +28,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Errorf("run `docker context use default` to switch to default context") return errors.Errorf("run `docker context use default` to switch to default context")
} }
if in.builder == "default" || in.builder == dockerCli.CurrentContext() { if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
ep, err := storeutil.GetCurrentEndpoint(dockerCli) ep, err := getCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -52,7 +51,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
return errors.Wrapf(err, "failed to find instance %q", in.builder) return errors.Wrapf(err, "failed to find instance %q", in.builder)
} }
ep, err := storeutil.GetCurrentEndpoint(dockerCli) ep, err := getCurrentEndpoint(dockerCli)
if err != nil { if err != nil {
return err return err
} }
@@ -80,8 +79,11 @@ func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes") flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes")
flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context") flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context")
_ = flags
return cmd return cmd
} }

View File

@@ -4,30 +4,86 @@ import (
"context" "context"
"net/url" "net/url"
"os" "os"
"path/filepath"
"strings" "strings"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
"github.com/docker/buildx/store" "github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker" "github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/kubernetes" "github.com/docker/cli/cli/context/kubernetes"
ctxstore "github.com/docker/cli/cli/context/store" ctxstore "github.com/docker/cli/cli/context/store"
dopts "github.com/docker/cli/opts" dopts "github.com/docker/cli/opts"
dockerclient "github.com/docker/docker/client" dockerclient "github.com/docker/docker/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
) )
// getStore returns current builder instance store
func getStore(dockerCli command.Cli) (*store.Txn, func(), error) {
s, err := store.New(getConfigStorePath(dockerCli))
if err != nil {
return nil, nil, err
}
return s.Txn()
}
// getConfigStorePath will look for correct configuration store path;
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
func getConfigStorePath(dockerCli command.Cli) string {
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
return buildxConfig
}
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
logrus.Debugf("using default config store %q", buildxConfig)
return buildxConfig
}
// getCurrentEndpoint returns the current default endpoint value
func getCurrentEndpoint(dockerCli command.Cli) (string, error) {
name := dockerCli.CurrentContext()
if name != "default" {
return name, nil
}
de, err := getDockerEndpoint(dockerCli, name)
if err != nil {
return "", errors.Errorf("docker endpoint for %q not found", name)
}
return de, nil
}
// getDockerEndpoint returns docker endpoint string for given context
func getDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return "", err
}
for _, l := range list {
if l.Name == name {
ep, ok := l.Endpoints["docker"]
if !ok {
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
}
typed, ok := ep.(docker.EndpointMeta)
if !ok {
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
}
return typed.Host, nil
}
}
return "", nil
}
// validateEndpoint validates that endpoint is either a context or a docker host // validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) { func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
de, err := storeutil.GetDockerEndpoint(dockerCli, ep) de, err := getDockerEndpoint(dockerCli, ep)
if err == nil && de != "" { if err == nil && de != "" {
if ep == "default" { if ep == "default" {
return de, nil return de, nil
@@ -41,6 +97,60 @@ func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
return h, nil return h, nil
} }
// getCurrentInstance finds the current builder instance
func getCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
ep, err := getCurrentEndpoint(dockerCli)
if err != nil {
return nil, err
}
ng, err := txn.Current(ep)
if err != nil {
return nil, err
}
if ng == nil {
ng, _ = getNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
}
return ng, nil
}
// getNodeGroup returns nodegroup based on the name
func getNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
ng, err := txn.NodeGroupByName(name)
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return nil, err
}
}
if ng != nil {
return ng, nil
}
if name == "default" {
name = dockerCli.CurrentContext()
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
return &store.NodeGroup{
Name: "default",
Nodes: []store.Node{
{
Name: "default",
Endpoint: name,
},
},
}, nil
}
}
return nil, errors.Errorf("no builder %q found", name)
}
// driversForNodeGroup returns drivers for a nodegroup instance // driversForNodeGroup returns drivers for a nodegroup instance
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) { func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
eg, _ := errgroup.WithContext(ctx) eg, _ := errgroup.WithContext(ctx)
@@ -64,10 +174,6 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
} }
ng.Driver = f.Name() ng.Driver = f.Name()
} }
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
if err != nil {
return nil, err
}
for i, n := range ng.Nodes { for i, n := range ng.Nodes {
func(i int, n store.Node) { func(i int, n store.Node) {
@@ -117,13 +223,12 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
} }
} }
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash) d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, dockerCli.ConfigFile(), kcc, n.Flags, n.ConfigFile, n.DriverOpts, n.Platforms, contextPathHash)
if err != nil { if err != nil {
di.Err = err di.Err = err
return nil return nil
} }
di.Driver = d di.Driver = d
di.ImageOpt = imageopt
return nil return nil
}) })
}(i, n) }(i, n)
@@ -139,15 +244,14 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) { func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") { if strings.HasPrefix(endpointName, "kubernetes://") {
u, _ := url.Parse(endpointName) u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" { if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig) clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
&clientcmd.ConfigOverrides{},
)
return clientConfig, nil
} }
rules := clientcmd.NewDefaultClientConfigLoadingRules()
apiConfig, err := rules.Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
} }
return kubernetes.ConfigFromContext(endpointName, s) return kubernetes.ConfigFromContext(endpointName, s)
} }
@@ -221,7 +325,7 @@ func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance,
} }
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) { func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -236,14 +340,14 @@ func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, con
// getDefaultDrivers returns drivers based on current cli config // getDefaultDrivers returns drivers based on current cli config
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) { func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := storeutil.GetStore(dockerCli) txn, release, err := getStore(dockerCli)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer release() defer release()
if !defaultOnly { if !defaultOnly {
ng, err := storeutil.GetCurrentInstance(txn, dockerCli) ng, err := getCurrentInstance(txn, dockerCli)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -253,20 +357,14 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
} }
} }
imageopt, err := storeutil.GetImageConfig(dockerCli, nil) d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), dockerCli.ConfigFile(), nil, nil, "", nil, nil, contextPathHash)
if err != nil {
return nil, err
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return []build.DriverInfo{ return []build.DriverInfo{
{ {
Name: "default", Name: "default",
Driver: d, Driver: d,
ImageOpt: imageopt,
}, },
}, nil }, nil
} }
@@ -377,56 +475,3 @@ func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
} }
return clientForEndpoint(a.dockerCli, name) return clientForEndpoint(a.dockerCli, name)
} }
type dinfo struct {
di *build.DriverInfo
info *driver.Info
platforms []specs.Platform
err error
}
type nginfo struct {
ng *store.NodeGroup
drivers []dinfo
err error
}
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
toBoot := make([]int, 0, len(ngi.drivers))
for i, d := range ngi.drivers {
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
continue
}
if d.info.Status != driver.Running {
toBoot = append(toBoot, i)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
if err != nil {
ngi.drivers[idx].err = err
}
return nil
})
}(idx)
}
err := eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}

View File

@@ -3,7 +3,6 @@ package commands
import ( import (
"fmt" "fmt"
"github.com/docker/buildx/util/cobrautil"
"github.com/docker/buildx/version" "github.com/docker/buildx/version"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -24,9 +23,5 @@ func versionCmd(dockerCli command.Cli) *cobra.Command {
return runVersion(dockerCli) return runVersion(dockerCli)
}, },
} }
// hide builder persistent flag for this command
cobrautil.HideInheritedFlags(cmd, "builder")
return cmd return cmd
} }

View File

@@ -1,144 +0,0 @@
variable "GO_VERSION" {
default = "1.17"
}
variable "BIN_OUT" {
default = "./bin"
}
variable "RELEASE_OUT" {
default = "./release-out"
}
variable "DOCS_FORMATS" {
default = "md"
}
// Special target: https://github.com/docker/metadata-action#bake-definition
target "meta-helper" {
tags = ["docker/buildx-bin:local"]
}
target "_common" {
args = {
GO_VERSION = GO_VERSION
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
}
}
group "default" {
targets = ["binaries"]
}
group "validate" {
targets = ["lint", "validate-vendor", "validate-docs"]
}
target "lint" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
output = ["type=cacheonly"]
}
target "validate-vendor" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
target = "validate"
output = ["type=cacheonly"]
}
target "validate-docs" {
inherits = ["_common"]
args = {
FORMATS = DOCS_FORMATS
}
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
target = "validate"
output = ["type=cacheonly"]
}
target "validate-authors" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
target = "validate"
output = ["type=cacheonly"]
}
target "update-vendor" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
target = "update"
output = ["."]
}
target "update-docs" {
inherits = ["_common"]
args = {
FORMATS = DOCS_FORMATS
}
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
target = "update"
output = ["./docs/reference"]
}
target "update-authors" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
target = "update"
output = ["."]
}
target "mod-outdated" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
target = "outdated"
output = ["type=cacheonly"]
}
target "test" {
inherits = ["_common"]
target = "test-coverage"
output = ["./coverage"]
}
target "binaries" {
inherits = ["_common"]
target = "binaries"
output = [BIN_OUT]
platforms = ["local"]
}
target "binaries-cross" {
inherits = ["binaries"]
platforms = [
"darwin/amd64",
"darwin/arm64",
"linux/amd64",
"linux/arm/v6",
"linux/arm/v7",
"linux/arm64",
"linux/ppc64le",
"linux/riscv64",
"linux/s390x",
"windows/amd64",
"windows/arm64"
]
}
target "release" {
inherits = ["binaries-cross"]
target = "release"
output = [RELEASE_OUT]
}
target "image" {
inherits = ["meta-helper", "binaries"]
output = ["type=image"]
}
target "image-cross" {
inherits = ["meta-helper", "binaries-cross"]
output = ["type=image"]
}
target "image-local" {
inherits = ["image"]
output = ["type=docker"]
}

198
docs/docsgen/generate.go Normal file
View File

@@ -0,0 +1,198 @@
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/docker/buildx/commands"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const descriptionSourcePath = "docs/reference/"
func generateDocs(opts *options) error {
dockerCLI, err := command.NewDockerCli()
if err != nil {
return err
}
cmd := &cobra.Command{
Use: "docker [OPTIONS] COMMAND [ARG...]",
Short: "The base command for the Docker CLI.",
}
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
return genCmd(cmd, opts.target)
}
func getMDFilename(cmd *cobra.Command) string {
name := cmd.CommandPath()
if i := strings.Index(name, " "); i >= 0 {
name = name[i+1:]
}
return strings.ReplaceAll(name, " ", "_") + ".md"
}
func genCmd(cmd *cobra.Command, dir string) error {
for _, c := range cmd.Commands() {
if err := genCmd(c, dir); err != nil {
return err
}
}
if !cmd.HasParent() {
return nil
}
mdFile := getMDFilename(cmd)
fullPath := filepath.Join(dir, mdFile)
content, err := ioutil.ReadFile(fullPath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return errors.Wrapf(err, "%s does not exist", mdFile)
}
}
cs := string(content)
markerStart := "<!---MARKER_GEN_START-->"
markerEnd := "<!---MARKER_GEN_END-->"
start := strings.Index(cs, markerStart)
end := strings.Index(cs, markerEnd)
if start == -1 {
return errors.Errorf("no start marker in %s", mdFile)
}
if end == -1 {
return errors.Errorf("no end marker in %s", mdFile)
}
out, err := cmdOutput(cmd, cs)
if err != nil {
return err
}
cont := cs[:start] + markerStart + "\n" + out + "\n" + cs[end:]
fi, err := os.Stat(fullPath)
if err != nil {
return err
}
if err := ioutil.WriteFile(fullPath, []byte(cont), fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to write %s", fullPath)
}
log.Printf("updated %s", fullPath)
return nil
}
func makeLink(txt, link string, f *pflag.Flag, isAnchor bool) string {
link = "#" + link
annotations, ok := f.Annotations["docs.external.url"]
if ok && len(annotations) > 0 {
link = annotations[0]
} else {
if !isAnchor {
return txt
}
}
return "[" + txt + "](" + link + ")"
}
func cmdOutput(cmd *cobra.Command, old string) (string, error) {
b := &strings.Builder{}
desc := cmd.Short
if cmd.Long != "" {
desc = cmd.Long
}
if desc != "" {
fmt.Fprintf(b, "%s\n\n", desc)
}
if len(cmd.Aliases) != 0 {
fmt.Fprintf(b, "### Aliases\n\n`%s`", cmd.Name())
for _, a := range cmd.Aliases {
fmt.Fprintf(b, ", `%s`", a)
}
fmt.Fprint(b, "\n\n")
}
if len(cmd.Commands()) != 0 {
fmt.Fprint(b, "### Subcommands\n\n")
fmt.Fprint(b, "| Name | Description |\n")
fmt.Fprint(b, "| --- | --- |\n")
for _, c := range cmd.Commands() {
fmt.Fprintf(b, "| [`%s`](%s) | %s |\n", c.Name(), getMDFilename(c), c.Short)
}
fmt.Fprint(b, "\n\n")
}
hasFlags := cmd.Flags().HasAvailableFlags()
cmd.Flags().AddFlagSet(cmd.InheritedFlags())
if hasFlags {
fmt.Fprint(b, "### Options\n\n")
fmt.Fprint(b, "| Name | Description |\n")
fmt.Fprint(b, "| --- | --- |\n")
cmd.Flags().VisitAll(func(f *pflag.Flag) {
if f.Hidden {
return
}
isLink := strings.Contains(old, "<a name=\""+f.Name+"\"></a>")
fmt.Fprint(b, "| ")
if f.Shorthand != "" {
name := "`-" + f.Shorthand + "`"
name = makeLink(name, f.Name, f, isLink)
fmt.Fprintf(b, "%s, ", name)
}
name := "`--" + f.Name
if f.Value.Type() != "bool" {
name += " " + f.Value.Type()
}
name += "`"
name = makeLink(name, f.Name, f, isLink)
fmt.Fprintf(b, "%s | %s |\n", name, f.Usage)
})
fmt.Fprintln(b, "")
}
return b.String(), nil
}
type options struct {
target string
}
func parseArgs() (*options, error) {
opts := &options{}
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
flags.StringVar(&opts.target, "target", descriptionSourcePath, "Docs directory")
err := flags.Parse(os.Args[1:])
return opts, err
}
func main() {
if err := run(); err != nil {
log.Printf("error: %+v", err)
os.Exit(1)
}
}
func run() error {
opts, err := parseArgs()
if err != nil {
return err
}
if err := generateDocs(opts); err != nil {
return err
}
return nil
}

View File

@@ -1,89 +0,0 @@
package main
import (
"log"
"os"
"github.com/docker/buildx/commands"
clidocstool "github.com/docker/cli-docs-tool"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
// import drivers otherwise factories are empty
// for --driver output flag usage
_ "github.com/docker/buildx/driver/docker"
_ "github.com/docker/buildx/driver/docker-container"
_ "github.com/docker/buildx/driver/kubernetes"
)
const defaultSourcePath = "docs/reference/"
type options struct {
source string
formats []string
}
func gen(opts *options) error {
log.SetFlags(0)
dockerCLI, err := command.NewDockerCli()
if err != nil {
return err
}
cmd := &cobra.Command{
Use: "docker [OPTIONS] COMMAND [ARG...]",
Short: "The base command for the Docker CLI.",
DisableAutoGenTag: true,
}
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
c, err := clidocstool.New(clidocstool.Options{
Root: cmd,
SourceDir: opts.source,
Plugin: true,
})
if err != nil {
return err
}
for _, format := range opts.formats {
switch format {
case "md":
if err = c.GenMarkdownTree(cmd); err != nil {
return err
}
case "yaml":
if err = c.GenYamlTree(cmd); err != nil {
return err
}
default:
return errors.Errorf("unknown format %q", format)
}
}
return nil
}
func run() error {
opts := &options{}
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
flags.StringVar(&opts.source, "source", defaultSourcePath, "Docs source folder")
flags.StringSliceVar(&opts.formats, "formats", []string{}, "Format (md, yaml)")
if err := flags.Parse(os.Args[1:]); err != nil {
return err
}
if len(opts.formats) == 0 {
return errors.New("Docs format required")
}
return gen(opts)
}
func main() {
if err := run(); err != nil {
log.Printf("ERROR: %+v", err)
os.Exit(1)
}
}

View File

@@ -5,7 +5,7 @@ docker buildx [OPTIONS] COMMAND
``` ```
<!---MARKER_GEN_START--> <!---MARKER_GEN_START-->
Extended build capabilities with BuildKit Build with BuildKit
### Subcommands ### Subcommands
@@ -27,17 +27,5 @@ Extended build capabilities with BuildKit
| [`version`](buildx_version.md) | Show buildx version information | | [`version`](buildx_version.md) | Show buildx version information |
### Options
| Name | Description |
| --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
You can also use the `BUILDX_BUILDER` environment variable.

View File

@@ -15,16 +15,16 @@ Build from a file
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file | | [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
| `--load` | Shorthand for `--set=*.output=type=docker` | | `--load` | Shorthand for --set=*.output=type=docker |
| `--metadata-file string` | Write build result metadata to the file | | `--metadata-file string` | Write build result metadata to the file |
| [`--no-cache`](#no-cache) | Do not use cache when building the image | | [`--no-cache`](#no-cache) | Do not use cache when building the image |
| [`--print`](#print) | Print the options without building | | [`--print`](#print) | Print the options without building |
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--progress string`](#progress) | Set type of progress output (auto, plain, tty). Use plain to show container output |
| [`--pull`](#pull) | Always attempt to pull a newer version of the image | | [`--pull`](#pull) | Always attempt to pull a newer version of the image |
| `--push` | Shorthand for `--set=*.output=type=registry` | | `--push` | Shorthand for --set=*.output=type=registry |
| [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) | | [`--set stringArray`](#set) | Override target value (eg: targetpattern.key=value) |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -34,23 +34,16 @@ Build from a file
Bake is a high-level build command. Each specified target will run in parallel Bake is a high-level build command. Each specified target will run in parallel
as part of the build. as part of the build.
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options) Read [High-level build options](https://github.com/docker/buildx#high-level-build-options) for introduction.
for introduction.
Please note that `buildx bake` command may receive backwards incompatible Please note that `buildx bake` command may receive backwards incompatible features in the future if needed. We are looking for feedback on improving the command and extending the functionality further.
features in the future if needed. We are looking for feedback on improving the
command and extending the functionality further.
## Examples ## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### <a name="file"></a> Specify a build definition file (-f, --file) ### <a name="file"></a> Specify a build definition file (-f, --file)
By default, `buildx bake` looks for build definition files in the current By default, `buildx bake` looks for build definition files in the current directory,
directory, the following are parsed: the following are parsed:
- `docker-compose.yml` - `docker-compose.yml`
- `docker-compose.yaml` - `docker-compose.yaml`
@@ -96,152 +89,27 @@ $ docker buildx bake -f docker-compose.dev.yaml backend database
... ...
``` ```
You can also use a remote `git` bake definition:
```console
$ docker buildx bake "git://github.com/docker/cli#master" --print
#1 [internal] load git source git://github.com/docker/cli#master
#1 0.686 2776a6d694f988c0c1df61cad4bfac0f54e481c8 refs/heads/master
#1 CACHED
{
"group": {
"default": [
"binary"
]
},
"target": {
"binary": {
"context": "git://github.com/docker/cli#master",
"dockerfile": "Dockerfile",
"args": {
"BASE_VARIANT": "alpine",
"GO_STRIP": "",
"VERSION": ""
},
"target": "binary",
"platforms": [
"local"
],
"output": [
"build"
]
}
}
}
```
As you can see the context is fixed to `git://github.com/docker/cli` even if
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
in the definition.
If you want to access the main context for bake command from a bake file
that has been imported remotely, you can use the `BAKE_CMD_CONTEXT` builtin var:
```console
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
target "default" {
context = BAKE_CMD_CONTEXT
dockerfile-inline = <<EOT
FROM alpine
WORKDIR /src
COPY . .
RUN ls -l && stop
EOT
}
```
```console
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
{
"group": {
"default": [
"default"
]
},
"target": {
"default": {
"context": ".",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
```
```console
$ touch foo bar
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
...
> [4/4] RUN ls -l && stop:
#8 0.101 total 0
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
#8 0.102 /bin/sh: stop: not found
```
```console
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master" --print
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
#1 0.401 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
#1 CACHED
{
"group": {
"default": [
"default"
]
},
"target": {
"default": {
"context": "git://github.com/docker/cli#master",
"dockerfile": "Dockerfile",
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
}
}
}
```
```console
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master"
...
> [4/4] RUN ls -l && stop:
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
#8 0.136 /bin/sh: stop: not found
```
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache) ### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
Same as `build --no-cache`. Do not use cache when building the image. Same as `build --no-cache`. Do not use cache when building the image.
### <a name="print"></a> Print the options without building (--print) ### <a name="print"></a> Print the options without building (--print)
Prints the resulting options of the targets desired to be built, in a JSON Prints the resulting options of the targets desired to be built, in a JSON format,
format, without starting a build. without starting a build.
```console ```console
$ docker buildx bake -f docker-bake.hcl --print db $ docker buildx bake -f docker-bake.hcl --print db
{ {
"group": { "target": {
"default": [ "db": {
"db" "context": "./",
] "dockerfile": "Dockerfile",
}, "tags": [
"target": { "docker.io/tiborvass/db"
"db": { ]
"context": "./", }
"dockerfile": "Dockerfile", }
"tags": [
"docker.io/tiborvass/db"
]
}
}
} }
``` ```
@@ -279,9 +147,8 @@ Same as `build --pull`.
--set targetpattern.key[.subkey]=value --set targetpattern.key[.subkey]=value
``` ```
Override target configurations from command line. The pattern matching syntax Override target configurations from command line. The pattern matching syntax is
is defined in https://golang.org/pkg/path/#Match. defined in https://golang.org/pkg/path/#Match.
**Examples** **Examples**
@@ -294,8 +161,8 @@ $ docker buildx bake --set foo*.no-cache # bypass caching only for
``` ```
Complete list of overridable fields: Complete list of overridable fields:
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `labels`, `no-cache`, args, cache-from, cache-to, context, dockerfile, labels, no-cache, output, platform,
`output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target` pull, secrets, ssh, tags, target
### File definition ### File definition
@@ -346,97 +213,18 @@ Complete list of valid target fields:
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`, `args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target` `no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
### Global scope attributes
You can define global scope attributes in HCL/JSON and use them for code reuse
and setting values for variables. This means you can do a "data-only" HCL file
with the values you want to set/override and use it in the list of regular
output files.
```hcl
# docker-bake.hcl
variable "FOO" {
default = "abc"
}
target "app" {
args = {
v1 = "pre-${FOO}"
}
}
```
You can use this file directly:
```console
$ docker buildx bake --print app
{
"group": {
"default": [
"app"
]
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-abc"
}
}
}
}
```
Or create an override configuration file:
```hcl
# env.hcl
WHOAMI="myuser"
FOO="def-${WHOAMI}"
```
And invoke bake together with both of the files:
```console
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
{
"group": {
"default": [
"app"
]
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre-def-myuser"
}
}
}
}
```
### HCL variables and functions ### HCL variables and functions
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable), Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
the HCL file format also supports variable block definitions. These can be used the HCL file format also supports variable block definitions. These can be used
to define variables with values provided by the current environment, or a to define variables with values provided by the current environment, or a default
default value when unset. value when unset.
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
are also supported.
#### Using interpolation to tag an image with the git sha Example of using interpolation to tag an image with the git sha:
Bake supports variable blocks which are assigned to matching environment ```console
variables or default values. $ cat <<'EOF' > docker-bake.hcl
```hcl
# docker-bake.hcl
variable "TAG" { variable "TAG" {
default = "latest" default = "latest"
} }
@@ -448,55 +236,45 @@ group "default" {
target "webapp" { target "webapp" {
tags = ["docker.io/username/webapp:${TAG}"] tags = ["docker.io/username/webapp:${TAG}"]
} }
``` EOF
```console
$ docker buildx bake --print webapp $ docker buildx bake --print webapp
{ {
"group": { "target": {
"default": [ "webapp": {
"webapp" "context": ".",
] "dockerfile": "Dockerfile",
}, "tags": [
"target": { "docker.io/username/webapp:latest"
"webapp": { ]
"context": ".", }
"dockerfile": "Dockerfile", }
"tags": [
"docker.io/username/webapp:latest"
]
}
}
} }
```
```console
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp $ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
{ {
"group": { "target": {
"default": [ "webapp": {
"webapp" "context": ".",
] "dockerfile": "Dockerfile",
}, "tags": [
"target": { "docker.io/username/webapp:985e9e9"
"webapp": { ]
"context": ".", }
"dockerfile": "Dockerfile", }
"tags": [
"docker.io/username/webapp:985e9e9"
]
}
}
} }
``` ```
#### Using the `add` function
You can use [`go-cty` stdlib functions]([go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)). A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
Here we are using the `add` function. provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
are also supported.
```hcl Example of using the `add` function:
# docker-bake.hcl
```console
$ cat <<'EOF' > docker-bake.hcl
variable "TAG" { variable "TAG" {
default = "latest" default = "latest"
} }
@@ -510,35 +288,26 @@ target "webapp" {
buildno = "${add(123, 1)}" buildno = "${add(123, 1)}"
} }
} }
``` EOF
```console
$ docker buildx bake --print webapp $ docker buildx bake --print webapp
{ {
"group": { "target": {
"default": [ "webapp": {
"webapp" "context": ".",
] "dockerfile": "Dockerfile",
}, "args": {
"target": { "buildno": "124"
"webapp": { }
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
} }
} }
}
} }
``` ```
#### Defining an `increment` function Example of defining an `increment` function:
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc). ```console
The following example defines a simple an `increment` function. $ cat <<'EOF' > docker-bake.hcl
```hcl
# docker-bake.hcl
function "increment" { function "increment" {
params = [number] params = [number]
result = number + 1 result = number + 1
@@ -553,35 +322,27 @@ target "webapp" {
buildno = "${increment(123)}" buildno = "${increment(123)}"
} }
} }
``` EOF
```console
$ docker buildx bake --print webapp $ docker buildx bake --print webapp
{ {
"group": { "target": {
"default": [ "webapp": {
"webapp" "context": ".",
] "dockerfile": "Dockerfile",
}, "args": {
"target": { "buildno": "124"
"webapp": { }
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"buildno": "124"
} }
} }
}
} }
``` ```
#### Only adding tags if a variable is not empty using an `notequal` Example of only adding tags if a variable is not empty using an `notequal`
function:
Here we are using the conditional `notequal` function which is just for ```console
symmetry with the `equal` one. $ cat <<'EOF' > docker-bake.hcl
```hcl
# docker-bake.hcl
variable "TAG" {default="" } variable "TAG" {default="" }
group "default" { group "default" {
@@ -598,277 +359,18 @@ target "webapp" {
notequal("",TAG) ? "my-image:${TAG}": "", notequal("",TAG) ? "my-image:${TAG}": "",
] ]
} }
``` EOF
```console
$ docker buildx bake --print webapp $ docker buildx bake --print webapp
{ {
"group": { "target": {
"default": [ "webapp": {
"webapp" "context": ".",
] "dockerfile": "Dockerfile",
}, "tags": [
"target": { "my-image:latest"
"webapp": { ]
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"my-image:latest"
]
}
}
}
```
#### Using variables in functions
You can refer variables to other variables like the target blocks can. Stdlib
functions can also be called but user functions can't at the moment.
```hcl
# docker-bake.hcl
variable "REPO" {
default = "user/repo"
}
function "tag" {
params = [tag]
result = ["${REPO}:${tag}"]
}
target "webapp" {
tags = tag("v1")
}
```
```console
$ docker buildx bake --print webapp
{
"group": {
"default": [
"webapp"
]
},
"target": {
"webapp": {
"context": ".",
"dockerfile": "Dockerfile",
"tags": [
"user/repo:v1"
]
}
}
}
```
#### Using variables in variables across files
When multiple files are specified, one file can use variables defined in
another file.
```hcl
# docker-bake1.hcl
variable "FOO" {
default = upper("${BASE}def")
}
variable "BAR" {
default = "-${FOO}-"
}
target "app" {
args = {
v1 = "pre-${BAR}"
}
}
```
```hcl
# docker-bake2.hcl
variable "BASE" {
default = "abc"
}
target "app" {
args = {
v2 = "${FOO}-post"
}
}
```
```console
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
{
"group": {
"default": [
"app"
]
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "pre--ABCDEF-",
"v2": "ABCDEF-post"
} }
} }
}
} }
``` ```
#### Using typed variables
Non-string variables are also accepted. The value passed with env is parsed
into suitable type first.
```hcl
# docker-bake.hcl
variable "FOO" {
default = 3
}
variable "IS_FOO" {
default = true
}
target "app" {
args = {
v1 = FOO > 5 ? "higher" : "lower"
v2 = IS_FOO ? "yes" : "no"
}
}
```
```console
$ docker buildx bake --print app
{
"group": {
"default": [
"app"
]
},
"target": {
"app": {
"context": ".",
"dockerfile": "Dockerfile",
"args": {
"v1": "lower",
"v2": "yes"
}
}
}
}
```
### Extension field with Compose
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
field `x-bake` can be used in your compose file to evaluate fields that are not
(yet) available in the [build definition](https://github.com/compose-spec/compose-spec/blob/master/build.md#build-definition).
```yaml
# docker-compose.yml
services:
addon:
image: ct-addon:bar
build:
context: .
dockerfile: ./Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
tags:
- ct-addon:foo
- ct-addon:alp
platforms:
- linux/amd64
- linux/arm64
cache-from:
- user/app:cache
- type=local,src=path/to/cache
cache-to: type=local,dest=path/to/cache
pull: true
aws:
image: ct-fake-aws:bar
build:
dockerfile: ./aws.Dockerfile
args:
CT_ECR: foo
CT_TAG: bar
x-bake:
secret:
- id=mysecret,src=./secret
- id=mysecret2,src=./secret2
platforms: linux/arm64
output: type=docker
no-cache: true
```
```console
$ docker buildx bake --print
{
"target": {
"addon": {
"context": ".",
"dockerfile": "./Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-addon:foo",
"ct-addon:alp"
],
"cache-from": [
"user/app:cache",
"type=local,src=path/to/cache"
],
"cache-to": [
"type=local,dest=path/to/cache"
],
"platforms": [
"linux/amd64",
"linux/arm64"
],
"pull": true
},
"aws": {
"context": ".",
"dockerfile": "./aws.Dockerfile",
"args": {
"CT_ECR": "foo",
"CT_TAG": "bar"
},
"tags": [
"ct-fake-aws:bar"
],
"secret": [
"id=mysecret,src=./secret",
"id=mysecret2,src=./secret2"
],
"platforms": [
"linux/arm64"
],
"output": [
"type=docker"
],
"no-cache": true
}
}
}
```
Complete list of valid fields for `x-bake`:
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
`pull`, `no-cache`
### Built-in variables
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
from a bake file that has been [imported remotely](#file).
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
specification (e.g. `linux/amd64`).

View File

@@ -15,32 +15,28 @@ Start a build
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) | | [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (host:ip) |
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) | | [`--allow stringSlice`](#allow) | Allow extra privileged entitlement, e.g. network.host, security.insecure |
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables | | [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) | | [`--cache-from stringArray`](#cache-from) | External cache sources (eg. user/app:cache, type=local,src=path/to/dir) |
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) | | [`--cache-to stringArray`](#cache-to) | Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir) |
| [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container | | [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (Default is 'PATH/Dockerfile') |
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| `--iidfile string` | Write the image ID to the file | | `--iidfile string` | Write the image ID to the file |
| `--label stringArray` | Set metadata for an image | | `--label stringArray` | Set metadata for an image |
| [`--load`](#load) | Shorthand for `--output=type=docker` | | [`--load`](#load) | Shorthand for --output=type=docker |
| `--metadata-file string` | Write build result metadata to the file | | `--metadata-file string` | Write build result metadata to the file |
| `--network string` | Set the networking mode for the RUN instructions during build | | `--network string` | Set the networking mode for the RUN instructions during build |
| `--no-cache` | Do not use cache when building the image | | `--no-cache` | Do not use cache when building the image |
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) | | [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: type=local,dest=path) |
| [`--platform stringArray`](#platform) | Set target platform for build | | [`--platform stringArray`](#platform) | Set target platform for build |
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--progress string`](#progress) | Set type of progress output (auto, plain, tty). Use plain to show container output |
| `--pull` | Always attempt to pull a newer version of the image | | `--pull` | Always attempt to pull a newer version of the image |
| [`--push`](#push) | Shorthand for `--output=type=registry` | | [`--push`](#push) | Shorthand for --output=type=registry |
| `-q`, `--quiet` | Suppress the build output and print image ID on success | | `--secret stringArray` | Secret file to expose to the build: id=mysecret,src=/local/secret |
| `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) | | `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]]) |
| [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` | | [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag in the 'name:tag' format |
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. | | [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
| [`--ulimit ulimit`](#ulimit) | Ulimit options |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -56,10 +52,6 @@ here well document a subset of the new flags.
## Examples ## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### <a name="platform"></a> Set the target platforms for the build (--platform) ### <a name="platform"></a> Set the target platforms for the build (--platform)
``` ```
@@ -236,17 +228,10 @@ single-platform build result to `docker images`.
--cache-from=[NAME|type=TYPE[,KEY=VALUE]] --cache-from=[NAME|type=TYPE[,KEY=VALUE]]
``` ```
Use an external cache source for a build. Supported types are `registry`, Use an external cache source for a build. Supported types are `registry` and `local`.
`local` and `gha`. The `registry` source can import cache from a cache manifest or (special) image
configuration on the registry. The `local` source can import cache from local
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) files previously exported with `--cache-to`.
can import cache from a cache manifest or (special) image configuration on the
registry.
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
import cache from local files previously exported with `--cache-to`.
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
can import cache from a previously exported cache with `--cache-to` in your
GitHub repository
If no type is specified, `registry` exporter is used with a specified reference. If no type is specified, `registry` exporter is used with a specified reference.
@@ -259,27 +244,18 @@ $ docker buildx build --cache-from=user/app:cache .
$ docker buildx build --cache-from=user/app . $ docker buildx build --cache-from=user/app .
$ docker buildx build --cache-from=type=registry,ref=user/app . $ docker buildx build --cache-from=type=registry,ref=user/app .
$ docker buildx build --cache-from=type=local,src=path/to/cache . $ docker buildx build --cache-from=type=local,src=path/to/cache .
$ docker buildx build --cache-from=type=gha .
``` ```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to) ### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
``` ```
--cache-to=[NAME|type=TYPE[,KEY=VALUE]] --cache-to=[NAME|type=TYPE[,KEY=VALUE]]
``` ```
Export build cache to an external cache destination. Supported types are Export build cache to an external cache destination. Supported types are `registry`,
`registry`, `local`, `inline` and `gha`. `local` and `inline`. Registry exports build cache to a cache manifest in the
registry, local exports cache to a local directory on the client and inline writes
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry. the cache metadata into the image configuration.
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
exports cache to a local directory on the client.
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
type writes the cache metadata into the image configuration.
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
`docker` driver currently only supports exporting inline cache metadata to image `docker` driver currently only supports exporting inline cache metadata to image
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
@@ -287,8 +263,8 @@ to trigger inline cache exporter.
Attribute key: Attribute key:
- `mode` - Specifies how many layers are exported with the cache. `min` on only - `mode` - Specifies how many layers are exported with the cache. min on only
exports layers already in the final build stage, `max` exports layers for exports layers already in the final build stage, max exports layers for
all stages. Metadata is always exported for the whole build. all stages. Metadata is always exported for the whole build.
**Examples** **Examples**
@@ -298,11 +274,8 @@ $ docker buildx build --cache-to=user/app:cache .
$ docker buildx build --cache-to=type=inline . $ docker buildx build --cache-to=type=inline .
$ docker buildx build --cache-to=type=registry,ref=user/app . $ docker buildx build --cache-to=type=registry,ref=user/app .
$ docker buildx build --cache-to=type=local,dest=path/to/cache . $ docker buildx build --cache-to=type=local,dest=path/to/cache .
$ docker buildx build --cache-to=type=gha .
``` ```
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
### <a name="allow"></a> Allow extra privileged entitlement (--allow) ### <a name="allow"></a> Allow extra privileged entitlement (--allow)
``` ```
@@ -316,7 +289,7 @@ Allow extra privileged entitlement. List of entitlements:
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox). [related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags)) with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#--buildkitd-flags-flags))
**Examples** **Examples**
@@ -324,24 +297,3 @@ with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_cre
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure' $ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
$ docker buildx build --allow security.insecure . $ docker buildx build --allow security.insecure .
``` ```
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
(gigabytes). If you omit the unit, the system uses bytes.
### <a name="ulimit"></a> Set ulimits (--ulimit)
`--ulimit` is specified with a soft and hard limit as such:
`<type>=<soft limit>[:<hard limit>]`, for example:
```console
$ docker buildx build --ulimit nofile=1024:1024 .
```
> **Note**
>
> If you do not provide a `hard limit`, the `soft limit` is used
> for both values. If no `ulimits` are set, they are inherited from
> the default `ulimits` set on the daemon.

View File

@@ -12,10 +12,10 @@ Create a new builder instance
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--append`](#append) | Append a node to builder instead of changing it | | [`--append`](#append) | Append a node to builder instead of changing it |
| `--bootstrap` | Boot builder after creation | | `--builder string` | Override the configured builder instance |
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon | | [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
| [`--config string`](#config) | BuildKit config file | | [`--config string`](#config) | BuildKit config file |
| [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) | | [`--driver string`](#driver) | Driver to use (available: []) |
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver | | [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
| [`--leave`](#leave) | Remove a node from builder instead of changing it | | [`--leave`](#leave) | Remove a node from builder instead of changing it |
| [`--name string`](#name) | Builder instance name | | [`--name string`](#name) | Builder instance name |
@@ -64,7 +64,7 @@ eager_beaver
``` ```
Adds flags when starting the buildkitd daemon. They take precedence over the Adds flags when starting the buildkitd daemon. They take precedence over the
configuration file specified by [`--config`](#config). See `buildkitd --help` configuration file specified by [`--config`](#--config-file). See `buildkitd --help`
for the available flags. for the available flags.
**Example** **Example**
@@ -80,14 +80,9 @@ for the available flags.
``` ```
Specifies the configuration file for the buildkitd daemon to use. The configuration Specifies the configuration file for the buildkitd daemon to use. The configuration
can be overridden by [`--buildkitd-flags`](#buildkitd-flags). can be overridden by [`--buildkitd-flags`](#--buildkitd-flags-flags).
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md). See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
Note that if you create a `docker-container` builder and have specified
certificates for registries in the `buildkitd.toml` configuration, the files
will be copied into the container under `/etc/buildkit/certs` and configuration
will be updated to reflect that.
### <a name="driver"></a> Set the builder driver to use (--driver) ### <a name="driver"></a> Set the builder driver to use (--driver)
``` ```
@@ -97,30 +92,17 @@ will be updated to reflect that.
Sets the builder driver to be used. There are two available drivers, each have Sets the builder driver to be used. There are two available drivers, each have
their own specificities. their own specificities.
#### `docker` driver - `docker` - Uses the builder that is built into the docker daemon. With this
driver, the [`--load`](buildx_build.md#--load) flag is implied by default on
`buildx build`. However, building multi-platform images or exporting cache is
not currently supported.
- `docker-container` - Uses a buildkit container that will be spawned via docker.
With this driver, both building multi-platform images and exporting cache are
supported. However, images built will not automatically appear in `docker images`
(see [`build --load`](buildx_build.md#--load)).
- `kubernetes` - Uses a kubernetes pods. With this driver, you can spin up pods
with defined buildkit container image to build your images.
Uses the builder that is built into the docker daemon. With this driver,
the [`--load`](buildx_build.md#load) flag is implied by default on
`buildx build`. However, building multi-platform images or exporting cache is
not currently supported.
#### `docker-container` driver
Uses a BuildKit container that will be spawned via docker. With this driver,
both building multi-platform images and exporting cache are supported.
Unlike `docker` driver, built images will not automatically appear in
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
to achieve that.
#### `kubernetes` driver
Uses a kubernetes pods. With this driver, you can spin up pods with defined
BuildKit container image to build your images.
Unlike `docker` driver, built images will not automatically appear in
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
to achieve that.
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt) ### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
@@ -134,7 +116,11 @@ Passes additional driver-specific options. Details for each driver:
- `docker-container` - `docker-container`
- `image=IMAGE` - Sets the container image to be used for running buildkit. - `image=IMAGE` - Sets the container image to be used for running buildkit.
- `network=NETMODE` - Sets the network mode for running the buildkit container. - `network=NETMODE` - Sets the network mode for running the buildkit container.
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`. - Example:
```console
--driver docker-container --driver-opt image=moby/buildkit:master,network=host
```
- `kubernetes` - `kubernetes`
- `image=IMAGE` - Sets the container image to be used for running buildkit. - `image=IMAGE` - Sets the container image to be used for running buildkit.
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace. - `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
@@ -146,32 +132,6 @@ Passes additional driver-specific options. Details for each driver:
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64` - `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false. - `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky" - `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
**Examples**
#### Use a custom network
```console
$ docker network create foonet
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
$ docker buildx inspect --bootstrap
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
map[foonet:0xc00018c0c0]
```
#### OpenTelemetry support
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
```console
$ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
$ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
$ docker buildx inspect --bootstrap
# buildx command should be traced at http://127.0.0.1:16686/
```
### <a name="leave"></a> Remove a node from a builder (--leave) ### <a name="leave"></a> Remove a node from a builder (--leave)

View File

@@ -11,15 +11,9 @@ Disk usage
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| `--filter filter` | Provide filter values | | `--filter filter` | Provide filter values |
| `--verbose` | Provide a more verbose output | | `--verbose` | Provide a more verbose output |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@@ -15,12 +15,6 @@ Commands to work on images in registry
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry | | [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
### Options
| Name | Description |
| --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -28,9 +22,3 @@ Commands to work on images in registry
Imagetools contains commands for working with manifest lists in the registry. Imagetools contains commands for working with manifest lists in the registry.
These commands are useful for inspecting multi-platform build results. These commands are useful for inspecting multi-platform build results.
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@@ -12,7 +12,7 @@ Create a new image based on source images
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--append`](#append) | Append to existing manifest | | [`--append`](#append) | Append to existing manifest |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| [`--dry-run`](#dry-run) | Show final image instead of pushing | | [`--dry-run`](#dry-run) | Show final image instead of pushing |
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file | | [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image | | [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
@@ -37,10 +37,6 @@ specified, create performs a carbon copy.
Use the `--append` flag to append the new sources to an existing manifest list Use the `--append` flag to append the new sources to an existing manifest list
in the destination. in the destination.
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### <a name="dry-run"></a> Show final image instead of pushing (--dry-run) ### <a name="dry-run"></a> Show final image instead of pushing (--dry-run)
Use the `--dry-run` flag to not push the image, just show it. Use the `--dry-run` flag to not push the image, just show it.

View File

@@ -11,7 +11,7 @@ Show details of image in the registry
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| [`--raw`](#raw) | Show original JSON manifest | | [`--raw`](#raw) | Show original JSON manifest |
@@ -41,12 +41,6 @@ Manifests:
... ...
``` ```
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw) ### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
Use the `--raw` option to print the original JSON bytes instead of the formatted Use the `--raw` option to print the original JSON bytes instead of the formatted

View File

@@ -12,7 +12,7 @@ Inspect current builder instance
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting | | [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -23,19 +23,6 @@ Shows information about the current or specified builder.
## Examples ## Examples
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
Use the `--bootstrap` option to ensure that the builder is running before
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
the buildkit container and waits until it is operational. Bootstrapping is
automatically done during build, and therefore not necessary. The same BuildKit
container is used during the lifetime of the associated builder node (as
displayed in `buildx ls`).
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### Get information about a builder instance ### Get information about a builder instance
By default, `inspect` shows information about the current builder. Specify the By default, `inspect` shows information about the current builder. Specify the
@@ -60,3 +47,12 @@ Endpoint: ssh://ubuntu@1.2.3.4
Status: running Status: running
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6 Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
``` ```
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
Use the `--bootstrap` option to ensure that the builder is running before
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
the buildkit container and waits until it is operational. Bootstrapping is
automatically done during build, and therefore not necessary. The same BuildKit
container is used during the lifetime of the associated builder node (as
displayed in `buildx ls`).

View File

@@ -12,8 +12,8 @@ Remove build cache
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| `-a`, `--all` | Remove all unused images, not just dangling ones | | `-a`, `--all` | Remove all unused images, not just dangling ones |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| `--filter filter` | Provide filter values (e.g., `until=24h`) | | `--filter filter` | Provide filter values (e.g. 'until=24h') |
| `-f`, `--force` | Do not prompt for confirmation | | `-f`, `--force` | Do not prompt for confirmation |
| `--keep-storage bytes` | Amount of disk space to keep for cache | | `--keep-storage bytes` | Amount of disk space to keep for cache |
| `--verbose` | Provide a more verbose output | | `--verbose` | Provide a more verbose output |
@@ -21,8 +21,3 @@ Remove build cache
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@@ -11,7 +11,7 @@ Remove a builder instance
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| [`--keep-state`](#keep-state) | Keep BuildKit state | | [`--keep-state`](#keep-state) | Keep BuildKit state |
@@ -24,10 +24,6 @@ default builder.
## Examples ## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).
### <a name="keep-state"></a> Keep BuildKit state (--keep-state) ### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
Keep BuildKit state, so it can be reused by a new builder with the same name. Keep BuildKit state, so it can be reused by a new builder with the same name.

View File

@@ -7,12 +7,6 @@ docker buildx stop [NAME]
<!---MARKER_GEN_START--> <!---MARKER_GEN_START-->
Stop builder instance Stop builder instance
### Options
| Name | Description |
| --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->
@@ -20,9 +14,3 @@ Stop builder instance
Stops the specified or current builder. This will not prevent buildx build to Stops the specified or current builder. This will not prevent buildx build to
restart the builder. The implementation of stop depends on the driver. restart the builder. The implementation of stop depends on the driver.
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@@ -11,7 +11,7 @@ Set the current builder instance
| Name | Description | | Name | Description |
| --- | --- | | --- | --- |
| [`--builder string`](#builder) | Override the configured builder instance | | `--builder string` | Override the configured builder instance |
| `--default` | Set builder as default for current context | | `--default` | Set builder as default for current context |
| `--global` | Builder persists context changes | | `--global` | Builder persists context changes |
@@ -23,9 +23,3 @@ Set the current builder instance
Switches the current builder instance. Build commands invoked after this command Switches the current builder instance. Build commands invoked after this command
will run on a specified builder. Alternatively, a context name can be used to will run on a specified builder. Alternatively, a context name can be used to
switch to the default builder of that context. switch to the default builder of that context.
## Examples
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@@ -2,6 +2,5 @@ package bkimage
const ( const (
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
QemuImage = "tonistiigi/binfmt:latest" // TODO: make this verified
DefaultRootlessImage = DefaultImage + "-rootless" DefaultRootlessImage = DefaultImage + "-rootless"
) )

View File

@@ -1,19 +1,17 @@
package docker package docker
import ( import (
"archive/tar"
"bytes" "bytes"
"context" "context"
"io" "io"
"io/ioutil" "io/ioutil"
"net" "net"
"os" "os"
"path"
"path/filepath"
"time" "time"
"github.com/docker/buildx/driver" "github.com/docker/buildx/driver"
"github.com/docker/buildx/driver/bkimage" "github.com/docker/buildx/driver/bkimage"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools" "github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
@@ -22,8 +20,6 @@ import (
"github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
dockerclient "github.com/docker/docker/client" dockerclient "github.com/docker/docker/client"
dockerarchive "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/stdcopy"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/tracing/detect" "github.com/moby/buildkit/util/tracing/detect"
@@ -32,15 +28,20 @@ import (
const ( const (
volumeStateSuffix = "_state" volumeStateSuffix = "_state"
// containerStateDir is the location where buildkitd inside the container
// stores its state. The container driver creates a Linux container, so
// this should match the location for Linux, as defined in:
// https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
containerBuildKitRootDir = "/var/lib/buildkit"
) )
type Driver struct { type Driver struct {
driver.InitConfig driver.InitConfig
factory driver.Factory factory driver.Factory
netMode string netMode string
image string image string
cgroupParent string env []string
env []string
} }
func (d *Driver) IsMobyDriver() bool { func (d *Driver) IsMobyDriver() bool {
@@ -117,27 +118,25 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
{ {
Type: mount.TypeVolume, Type: mount.TypeVolume,
Source: d.Name + volumeStateSuffix, Source: d.Name + volumeStateSuffix,
Target: confutil.DefaultBuildKitStateDir, Target: containerBuildKitRootDir,
}, },
}, },
} }
if d.netMode != "" { if d.netMode != "" {
hc.NetworkMode = container.NetworkMode(d.netMode) hc.NetworkMode = container.NetworkMode(d.netMode)
} }
if info, err := d.DockerAPI.Info(ctx); err == nil && info.CgroupDriver == "cgroupfs" {
// Place all buildkit containers inside this cgroup by default so limits can be attached
// to all build activity on the host.
hc.CgroupParent = "/docker/buildx"
if d.cgroupParent != "" {
hc.CgroupParent = d.cgroupParent
}
}
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name) _, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
if err != nil { if err != nil {
return err return err
} }
if err := d.copyToContainer(ctx, d.InitConfig.Files); err != nil { if f := d.InitConfig.ConfigFile; f != "" {
return err buf, err := readFileToTar(f)
if err != nil {
return err
}
if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
return err
}
} }
if err := d.start(ctx, l); err != nil { if err := d.start(ctx, l); err != nil {
return err return err
@@ -197,24 +196,6 @@ func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
return rc.Close() return rc.Close()
} }
func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) error {
srcPath, err := writeConfigFiles(files)
if err != nil {
return err
}
if srcPath != "" {
defer os.RemoveAll(srcPath)
}
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
})
if err != nil {
return err
}
defer srcArchive.Close()
return d.DockerAPI.CopyToContainer(ctx, d.Name, "/", srcArchive, dockertypes.CopyToContainerOptions{})
}
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) { func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
execConfig := types.ExecConfig{ execConfig := types.ExecConfig{
Cmd: cmd, Cmd: cmd,
@@ -376,6 +357,29 @@ func (d *demux) Read(dt []byte) (int, error) {
return d.Reader.Read(dt) return d.Reader.Read(dt)
} }
func readFileToTar(fn string) (*bytes.Buffer, error) {
buf := bytes.NewBuffer(nil)
tw := tar.NewWriter(buf)
dt, err := ioutil.ReadFile(fn)
if err != nil {
return nil, err
}
if err := tw.WriteHeader(&tar.Header{
Name: "/etc/buildkit/buildkitd.toml",
Size: int64(len(dt)),
Mode: 0644,
}); err != nil {
return nil, err
}
if _, err := tw.Write(dt); err != nil {
return nil, err
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
type logWriter struct { type logWriter struct {
logger progress.SubLogger logger progress.SubLogger
stream int stream int
@@ -385,27 +389,3 @@ func (l *logWriter) Write(dt []byte) (int, error) {
l.logger.Log(l.stream, dt) l.logger.Log(l.stream, dt)
return len(dt), nil return len(dt), nil
} }
func writeConfigFiles(m map[string][]byte) (_ string, err error) {
// Temp dir that will be copied to the container
tmpDir, err := os.MkdirTemp("", "buildkitd-config")
if err != nil {
return "", err
}
defer func() {
if err != nil {
os.RemoveAll(tmpDir)
}
}()
for f, dt := range m {
f = path.Join(confutil.DefaultBuildKitConfigDir, f)
p := filepath.Join(tmpDir, f)
if err := os.MkdirAll(filepath.Dir(p), 0700); err != nil {
return "", err
}
if err := os.WriteFile(p, dt, 0600); err != nil {
return "", err
}
}
return tmpDir, nil
}

View File

@@ -49,8 +49,6 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
} }
case k == "image": case k == "image":
d.image = v d.image = v
case k == "cgroup-parent":
d.cgroupParent = v
case strings.HasPrefix(k, "env."): case strings.HasPrefix(k, "env."):
envName := strings.TrimPrefix(k, "env.") envName := strings.TrimPrefix(k, "env.")
if envName == "" { if envName == "" {

View File

@@ -40,8 +40,6 @@ func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
func (d *Driver) Client(ctx context.Context) (*client.Client, error) { func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) { return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil) return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
}), client.WithSessionDialer(func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
return d.DockerAPI.DialHijack(ctx, "/session", proto, meta)
})) }))
} }

View File

@@ -44,7 +44,7 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
if cfg.DockerAPI == nil { if cfg.DockerAPI == nil {
return nil, errors.Errorf("docker driver requires docker API access") return nil, errors.Errorf("docker driver requires docker API access")
} }
if len(cfg.Files) > 0 { if cfg.ConfigFile != "" {
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file") return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
} }

View File

@@ -18,8 +18,6 @@ import (
"github.com/moby/buildkit/util/tracing/detect" "github.com/moby/buildkit/util/tracing/detect"
"github.com/pkg/errors" "github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
@@ -41,18 +39,15 @@ type Driver struct {
factory driver.Factory factory driver.Factory
minReplicas int minReplicas int
deployment *appsv1.Deployment deployment *appsv1.Deployment
configMaps []*corev1.ConfigMap
clientset *kubernetes.Clientset clientset *kubernetes.Clientset
deploymentClient clientappsv1.DeploymentInterface deploymentClient clientappsv1.DeploymentInterface
podClient clientcorev1.PodInterface podClient clientcorev1.PodInterface
configMapClient clientcorev1.ConfigMapInterface
podChooser podchooser.PodChooser podChooser podchooser.PodChooser
} }
func (d *Driver) IsMobyDriver() bool { func (d *Driver) IsMobyDriver() bool {
return false return false
} }
func (d *Driver) Config() driver.InitConfig { func (d *Driver) Config() driver.InitConfig {
return d.InitConfig return d.InitConfig
} }
@@ -61,24 +56,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error { return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{}) _, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
if err != nil { if err != nil {
if !apierrors.IsNotFound(err) { // TODO: return err if err != ErrNotFound
return errors.Wrapf(err, "error for bootstrap %q", d.deployment.Name)
}
for _, cfg := range d.configMaps {
// create ConfigMap first if exists
_, err = d.configMapClient.Create(ctx, cfg, metav1.CreateOptions{})
if err != nil {
if !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "error while calling configMapClient.Create for %q", cfg.Name)
}
_, err = d.configMapClient.Update(ctx, cfg, metav1.UpdateOptions{})
if err != nil {
return errors.Wrapf(err, "error while calling configMapClient.Update for %q", cfg.Name)
}
}
}
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{}) _, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
if err != nil { if err != nil {
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name) return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
@@ -167,16 +145,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error { func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil { if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
}
}
for _, cfg := range d.configMaps {
if err := d.configMapClient.Delete(ctx, cfg.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "error while calling configMapClient.Delete for %q", cfg.Name)
}
}
} }
return nil return nil
} }

View File

@@ -59,13 +59,11 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
if err != nil { if err != nil {
return nil, err return nil, err
} }
d := &Driver{ d := &Driver{
factory: f, factory: f,
InitConfig: cfg, InitConfig: cfg,
clientset: clientset, clientset: clientset,
} }
deploymentOpt := &manifest.DeploymentOpt{ deploymentOpt := &manifest.DeploymentOpt{
Name: deploymentName, Name: deploymentName,
Image: bkimage.DefaultImage, Image: bkimage.DefaultImage,
@@ -73,19 +71,13 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
BuildkitFlags: cfg.BuildkitFlags, BuildkitFlags: cfg.BuildkitFlags,
Rootless: false, Rootless: false,
Platforms: cfg.Platforms, Platforms: cfg.Platforms,
ConfigFiles: cfg.Files,
} }
deploymentOpt.Qemu.Image = bkimage.QemuImage
loadbalance := LoadbalanceSticky loadbalance := LoadbalanceSticky
imageOverride := ""
for k, v := range cfg.DriverOpts { for k, v := range cfg.DriverOpts {
switch k { switch k {
case "image": case "image":
if v != "" { imageOverride = v
deploymentOpt.Image = v
}
case "namespace": case "namespace":
namespace = v namespace = v
case "replicas": case "replicas":
@@ -125,31 +117,20 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
return nil, errors.Errorf("invalid loadbalance %q", v) return nil, errors.Errorf("invalid loadbalance %q", v)
} }
loadbalance = v loadbalance = v
case "qemu.install":
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
if err != nil {
return nil, err
}
case "qemu.image":
if v != "" {
deploymentOpt.Qemu.Image = v
}
default: default:
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName) return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
} }
} }
if imageOverride != "" {
d.deployment, d.configMaps, err = manifest.NewDeployment(deploymentOpt) deploymentOpt.Image = imageOverride
}
d.deployment, err = manifest.NewDeployment(deploymentOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
d.minReplicas = deploymentOpt.Replicas d.minReplicas = deploymentOpt.Replicas
d.deploymentClient = clientset.AppsV1().Deployments(namespace) d.deploymentClient = clientset.AppsV1().Deployments(namespace)
d.podClient = clientset.CoreV1().Pods(namespace) d.podClient = clientset.CoreV1().Pods(namespace)
d.configMapClient = clientset.CoreV1().ConfigMaps(namespace)
switch loadbalance { switch loadbalance {
case LoadbalanceSticky: case LoadbalanceSticky:
d.podChooser = &podchooser.StickyPodChooser{ d.podChooser = &podchooser.StickyPodChooser{

View File

@@ -1,8 +1,6 @@
package manifest package manifest
import ( import (
"fmt"
"path"
"strings" "strings"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
@@ -14,22 +12,11 @@ import (
) )
type DeploymentOpt struct { type DeploymentOpt struct {
Namespace string Namespace string
Name string Name string
Image string Image string
Replicas int Replicas int
BuildkitFlags []string
// Qemu
Qemu struct {
// when true, will install binfmt
Install bool
Image string
}
BuildkitFlags []string
// files mounted at /etc/buildkitd
ConfigFiles map[string][]byte
Rootless bool Rootless bool
NodeSelector map[string]string NodeSelector map[string]string
RequestsCPU string RequestsCPU string
@@ -44,7 +31,7 @@ const (
AnnotationPlatform = "buildx.docker.com/platform" AnnotationPlatform = "buildx.docker.com/platform"
) )
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) { func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
labels := map[string]string{ labels := map[string]string{
"app": opt.Name, "app": opt.Name,
} }
@@ -57,7 +44,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",") annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
} }
d = &appsv1.Deployment{ d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(), APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment", Kind: "Deployment",
@@ -104,54 +91,9 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
}, },
}, },
} }
for _, cfg := range splitConfigFiles(opt.ConfigFiles) {
cc := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: opt.Namespace,
Name: opt.Name + "-" + cfg.name,
Annotations: annotations,
},
Data: cfg.files,
}
d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{
Name: cfg.name,
MountPath: path.Join("/etc/buildkit", cfg.path),
}}
d.Spec.Template.Spec.Volumes = []corev1.Volume{{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cc.Name,
},
},
},
}}
c = append(c, cc)
}
if opt.Qemu.Install {
d.Spec.Template.Spec.InitContainers = []corev1.Container{
{
Name: "qemu",
Image: opt.Qemu.Image,
Args: []string{"--install", "all"},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
},
}
}
if opt.Rootless { if opt.Rootless {
if err := toRootless(d); err != nil { if err := toRootless(d); err != nil {
return nil, nil, err return nil, err
} }
} }
@@ -162,7 +104,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
if opt.RequestsCPU != "" { if opt.RequestsCPU != "" {
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU) reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
} }
@@ -170,7 +112,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
if opt.RequestsMemory != "" { if opt.RequestsMemory != "" {
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory) reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
} }
@@ -178,7 +120,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
if opt.LimitsCPU != "" { if opt.LimitsCPU != "" {
limCPU, err := resource.ParseQuantity(opt.LimitsCPU) limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
} }
@@ -186,12 +128,12 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
if opt.LimitsMemory != "" { if opt.LimitsMemory != "" {
limMemory, err := resource.ParseQuantity(opt.LimitsMemory) limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
} }
return return d, nil
} }
func toRootless(d *appsv1.Deployment) error { func toRootless(d *appsv1.Deployment) error {
@@ -207,35 +149,3 @@ func toRootless(d *appsv1.Deployment) error {
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined" d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
return nil return nil
} }
type config struct {
name string
path string
files map[string]string
}
func splitConfigFiles(m map[string][]byte) []config {
var c []config
idx := map[string]int{}
nameIdx := 0
for k, v := range m {
dir := path.Dir(k)
i, ok := idx[dir]
if !ok {
idx[dir] = len(c)
i = len(c)
name := "config"
if dir != "." {
nameIdx++
name = fmt.Sprintf("%s-%d", name, nameIdx)
}
c = append(c, config{
path: dir,
name: name,
files: map[string]string{},
})
}
c[i].files[path.Base(k)] = string(v)
}
return c
}

View File

@@ -53,7 +53,7 @@ type InitConfig struct {
DockerAPI dockerclient.APIClient DockerAPI dockerclient.APIClient
KubeClientConfig KubeClientConfig KubeClientConfig KubeClientConfig
BuildkitFlags []string BuildkitFlags []string
Files map[string][]byte ConfigFile string
DriverOpts map[string]string DriverOpts map[string]string
Auth Auth Auth Auth
Platforms []specs.Platform Platforms []specs.Platform
@@ -103,17 +103,17 @@ func GetFactory(name string, instanceRequired bool) Factory {
return nil return nil
} }
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) { func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, config string, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
ic := InitConfig{ ic := InitConfig{
DockerAPI: api, DockerAPI: api,
KubeClientConfig: kcc, KubeClientConfig: kcc,
Name: name, Name: name,
BuildkitFlags: flags, BuildkitFlags: flags,
ConfigFile: config,
DriverOpts: do, DriverOpts: do,
Auth: auth, Auth: auth,
Platforms: platforms, Platforms: platforms,
ContextPathHash: contextPathHash, ContextPathHash: contextPathHash,
Files: files,
} }
if f == nil { if f == nil {
var err error var err error
@@ -129,15 +129,8 @@ func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.API
return &cachedDriver{Driver: d}, nil return &cachedDriver{Driver: d}, nil
} }
func GetFactories() []Factory { func GetFactories() map[string]Factory {
ds := make([]Factory, 0, len(drivers)) return drivers
for _, d := range drivers {
ds = append(ds, d)
}
sort.Slice(ds, func(i, j int) bool {
return ds[i].Name() < ds[j].Name()
})
return ds
} }
type cachedDriver struct { type cachedDriver struct {

36
go.mod
View File

@@ -1,25 +1,28 @@
module github.com/docker/buildx module github.com/docker/buildx
go 1.16 go 1.13
require ( require (
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
github.com/bugsnag/bugsnag-go v1.4.1 // indirect github.com/bugsnag/bugsnag-go v1.4.1 // indirect
github.com/bugsnag/panicwrap v1.2.0 // indirect github.com/bugsnag/panicwrap v1.2.0 // indirect
github.com/cenkalti/backoff v2.1.1+incompatible // indirect github.com/cenkalti/backoff v2.1.1+incompatible // indirect
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c
github.com/containerd/console v1.0.3 github.com/containerd/console v1.0.2
github.com/containerd/containerd v1.5.5 github.com/containerd/containerd v1.5.4
github.com/docker/cli v20.10.8+incompatible github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
github.com/docker/cli-docs-tool v0.2.1 github.com/docker/cli v20.10.7+incompatible
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.7+incompatible github.com/docker/docker v20.10.7+incompatible
github.com/docker/docker-credential-helpers v0.6.4-0.20210125172408-38bea2ce277a // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-units v0.4.0
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect github.com/fvbommel/sortorder v1.0.1 // indirect
github.com/gofrs/flock v0.7.3 github.com/gofrs/flock v0.7.3
github.com/gofrs/uuid v3.3.0+incompatible // indirect github.com/gofrs/uuid v3.3.0+incompatible // indirect
@@ -30,15 +33,17 @@ require (
github.com/hashicorp/hcl/v2 v2.8.2 github.com/hashicorp/hcl/v2 v2.8.2
github.com/jinzhu/gorm v1.9.2 // indirect github.com/jinzhu/gorm v1.9.2 // indirect
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
github.com/jinzhu/now v1.0.0 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da github.com/lib/pq v1.10.0 // indirect
github.com/mattn/go-sqlite3 v1.10.0 // indirect
github.com/moby/buildkit v0.8.2-0.20210702160134-1a7543a10527
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 github.com/opencontainers/image-spec v1.0.1
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.2.1 github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/theupdateframework/notary v0.6.1 // indirect github.com/theupdateframework/notary v0.6.1 // indirect
@@ -50,15 +55,12 @@ require (
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
k8s.io/api v0.22.1 k8s.io/api v0.20.6
k8s.io/apimachinery v0.22.1 k8s.io/apimachinery v0.20.6
k8s.io/client-go v0.22.1 k8s.io/client-go v0.20.6
) )
replace ( replace (
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210817025855-ba2adeebdb8d+incompatible github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210609100121-ef4d47340142+incompatible
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20210714055410-d010b05b4939
) )

912
go.sum

File diff suppressed because it is too large Load Diff

16
hack/binaries Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
. $(dirname $0)/util
set -eu
: ${TARGETPLATFORM=$CLI_PLATFORM}
platformFlag=""
if [ -n "$TARGETPLATFORM" ]; then
platformFlag="--platform $TARGETPLATFORM"
fi
buildxCmd build $platformFlag \
--target "binaries" \
--output "type=local,dest=./bin/" \
.

38
hack/build_ci_first_pass Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
TYP=$1
. $(dirname $0)/util
set -e
usage() {
echo "usage: ./hack/build_ci_first_pass <binaries>"
exit 1
}
if [ -z "$TYP" ]; then
usage
fi
importCacheFlags=""
exportCacheFlags=""
if [ "$GITHUB_ACTIONS" = "true" ]; then
if [ -n "$cacheRefFrom" ]; then
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
fi
if [ -n "$cacheRefTo" ]; then
exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo"
fi
fi
case $TYP in
"binaries")
buildxCmd build $importCacheFlags $exportCacheFlags \
--target "binaries" \
$currentcontext
;;
*)
echo >&2 "Unknown type $TYP"
exit 1
;;
esac

24
hack/cross Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
. $(dirname $0)/util
set -e
: ${TARGETPLATFORM=linux/amd64,linux/arm/v7,linux/arm64,darwin/amd64,windows/amd64,linux/ppc64le,linux/s390x,linux/riscv64}
: ${EXPORT_LOCAL=}
importCacheFlags=""
if [ "$GITHUB_ACTIONS" = "true" ]; then
if [ -n "$cacheRefFrom" ]; then
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
fi
fi
exportFlag=""
if [ -n "$EXPORT_LOCAL" ]; then
exportFlag="--output=type=local,dest=$EXPORT_LOCAL"
fi
buildxCmd build $importCacheFlags $exportFlag \
--target "binaries" \
--platform "$TARGETPLATFORM" \
$currentcontext

View File

@@ -13,3 +13,7 @@ else
( $dockerdCmd &>/var/log/dockerd.log & ) ( $dockerdCmd &>/var/log/dockerd.log & )
exec ash exec ash
fi fi

View File

@@ -1,33 +0,0 @@
# syntax=docker/dockerfile:1.3-labs
FROM alpine:3.14 AS gen
RUN apk add --no-cache git
WORKDIR /src
RUN --mount=type=bind,target=. <<EOT
#!/usr/bin/env bash
set -e
mkdir /out
# see also ".mailmap" for how email addresses and names are deduplicated
{
echo "# This file lists all individuals having contributed content to the repository."
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
echo
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
} > /out/AUTHORS
cat /out/AUTHORS
EOT
FROM scratch AS update
COPY --from=gen /out /
FROM gen AS validate
RUN --mount=type=bind,target=.,rw <<EOT
set -e
git add -A
cp -rf /out/* .
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
git status --porcelain -- AUTHORS
exit 1
fi
EOT

View File

@@ -1,42 +1,29 @@
# syntax=docker/dockerfile:1.3-labs # syntax = docker/dockerfile:1.2
ARG GO_VERSION=1.17 FROM golang:1.16-alpine AS docsgen
ARG FORMATS=md,yaml
FROM golang:${GO_VERSION}-alpine AS docsgen
WORKDIR /src WORKDIR /src
RUN --mount=target=. \ RUN --mount=target=. \
--mount=target=/root/.cache,type=cache \ --mount=target=/root/.cache,type=cache \
go build -mod=vendor -o /out/docsgen ./docs/generate.go go build -mod=vendor -o /out/docsgen ./docs/docsgen
FROM alpine AS gen FROM alpine AS gen
RUN apk add --no-cache rsync git RUN apk add --no-cache rsync git
WORKDIR /src WORKDIR /src
COPY --from=docsgen /out/docsgen /usr/bin COPY --from=docsgen /out/docsgen /usr/bin
ARG FORMATS
RUN --mount=target=/context \ RUN --mount=target=/context \
--mount=target=.,type=tmpfs <<EOT --mount=target=.,type=tmpfs,readwrite \
set -e rsync -a /context/. . && \
rsync -a /context/. . docsgen && \
docsgen --formats "$FORMATS" --source "docs/reference" mkdir /out && cp -r docs/reference /out
mkdir /out
cp -r docs/reference /out
EOT
FROM scratch AS update FROM scratch AS update
COPY --from=gen /out /out COPY --from=gen /out /out
FROM gen AS validate FROM gen AS validate
RUN --mount=target=/context \ RUN --mount=target=/context \
--mount=target=.,type=tmpfs <<EOT --mount=target=.,type=tmpfs,readwrite \
set -e rsync -a /context/. . && \
rsync -a /context/. . git add -A && \
git add -A rm -rf docs/reference/* && \
rm -rf docs/reference/* cp -rf /out/* ./docs/ && \
cp -rf /out/* ./docs/ ./hack/validate-docs check
if [ -n "$(git status --porcelain -- docs/reference)" ]; then
echo >&2 'ERROR: Docs result differs. Please update with "make docs"'
git status --porcelain -- docs/reference
exit 1
fi
EOT

View File

@@ -1,8 +1,6 @@
# syntax=docker/dockerfile:1.3 # syntax=docker/dockerfile:1.2
ARG GO_VERSION=1.17 FROM golang:1.16-alpine
FROM golang:${GO_VERSION}-alpine
RUN apk add --no-cache gcc musl-dev yamllint RUN apk add --no-cache gcc musl-dev yamllint
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0 RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
WORKDIR /go/src/github.com/docker/buildx WORKDIR /go/src/github.com/docker/buildx

View File

@@ -1,44 +1,23 @@
# syntax=docker/dockerfile:1.3-labs # syntax = docker/dockerfile:1.2
ARG GO_VERSION=1.17 FROM golang:1.16-alpine AS vendored
RUN apk add --no-cache git rsync
FROM golang:${GO_VERSION}-alpine AS base
RUN apk add --no-cache git rsync
WORKDIR /src WORKDIR /src
FROM base AS vendored
RUN --mount=target=/context \ RUN --mount=target=/context \
--mount=target=.,type=tmpfs \ --mount=target=.,type=tmpfs,readwrite \
--mount=target=/go/pkg/mod,type=cache <<EOT --mount=target=/go/pkg/mod,type=cache \
set -e rsync -a /context/. . && \
rsync -a /context/. . go mod tidy && go mod vendor && \
go mod tidy mkdir /out && cp -r go.mod go.sum vendor /out
go mod vendor
mkdir /out
cp -r go.mod go.sum vendor /out
EOT
FROM scratch AS update FROM scratch AS update
COPY --from=vendored /out /out COPY --from=vendored /out /out
FROM vendored AS validate FROM vendored AS validate
RUN --mount=target=/context \ RUN --mount=target=/context \
--mount=target=.,type=tmpfs <<EOT --mount=target=.,type=tmpfs,readwrite \
set -e rsync -a /context/. . && \
rsync -a /context/. . git add -A && \
git add -A rm -rf vendor && \
rm -rf vendor cp -rf /out/* . && \
cp -rf /out/* . ./hack/validate-vendor check
if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
echo >&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"'
git status --porcelain -- go.mod go.sum vendor
exit 1
fi
EOT
FROM psampaz/go-mod-outdated:v0.8.0 AS go-mod-outdated
FROM base AS outdated
RUN --mount=target=.,ro \
--mount=target=/go/pkg/mod,type=cache \
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
go list -mod=readonly -u -m -json all | go-mod-outdated -update -direct

21
hack/generate-authors Executable file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -eu -o pipefail -x
if [ -x "$(command -v greadlink)" ]; then
# on macOS, GNU readlink is ava (greadlink) can be installed through brew install coreutils
cd "$(dirname "$(greadlink -f "$BASH_SOURCE")")/.."
else
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
fi
# see also ".mailmap" for how email addresses and names are deduplicated
{
cat <<-'EOH'
# This file lists all individuals having contributed content to the repository.
# For how it is generated, see `scripts/generate-authors.sh`.
EOH
echo
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
} > AUTHORS

6
hack/lint Executable file
View File

@@ -0,0 +1,6 @@
#!/usr/bin/env bash
. $(dirname $0)/util
set -eu
buildxCmd build --file ./hack/dockerfiles/lint.Dockerfile .

View File

@@ -1,20 +1,28 @@
#!/usr/bin/env bash #!/usr/bin/env bash
OUT=${1:-release-out}
. $(dirname $0)/util
set -eu -o pipefail set -eu -o pipefail
: ${BUILDX_CMD=docker buildx} : ${PLATFORMS=linux/amd64}
: ${RELEASE_OUT=./release-out}
# release importCacheFlags=""
(set -x ; ${BUILDX_CMD} bake --set "*.output=$RELEASE_OUT" release) if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then
for ref in $cacheRefFrom; do
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
done
fi
buildxCmd build $importCacheFlags \
--target "release" \
--platform "$PLATFORMS" \
--output "type=local,dest=$OUT" \
$currentcontext
# wrap binaries # wrap binaries
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/ { set +x; } 2>/dev/null
find ./${RELEASE_OUT} -type d -empty -delete if [[ $PLATFORMS =~ "," ]]; then
mv -f ./$OUT/**/* ./$OUT/
# checksums find ./$OUT -type d -empty -delete
if ! type shasum > /dev/null 2>&1; then
echo >&2 "ERROR: shasum is required"
exit 1
fi fi
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt

View File

@@ -2,18 +2,17 @@
set -e set -e
: ${BUILDX_CMD=docker buildx}
: ${TMUX=} : ${TMUX=}
function clean { function clean {
docker rmi $iid docker rmi $(cat $iidfile)
} }
iid=buildx-shell iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
(set -x ; ${BUILDX_CMD} build --output "type=docker,name=$iid" --target shell .) DOCKER_BUILDKIT=1 docker build --iidfile $iidfile --target demo-env .
trap clean EXIT trap clean EXIT
SSH= SSH=
if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then
SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK" SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
fi fi
docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $iid docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $(cat $iidfile)

47
hack/test Executable file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
. $(dirname $0)/util
set -eu -o pipefail
: ${BUILDX_NOCACHE=}
: ${TEST_COVERAGE=}
importCacheFlags=""
if [ -n "$cacheRefFrom" ]; then
if [ "$cacheType" = "local" ]; then
for ref in $cacheRefFrom; do
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
done
fi
fi
iid="buildx-tests"
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
coverageVol=""
coverageFlags=""
if [ "$TEST_COVERAGE" = "1" ]; then
covdir="$(pwd)/coverage"
mkdir -p "$covdir"
coverageVol="-v $covdir:/coverage"
coverageFlags="-coverprofile=/coverage/coverage.txt -covermode=atomic"
fi
buildxCmd build $importCacheFlags \
--target "integration-tests" \
--output "type=docker,name=$iid" \
$currentcontext
cacheVolume="buildx-cache"
if ! docker inspect "$cacheVolume" > /dev/null 2>&1; then
cacheVolume=$(docker create --name=buildx-cache -v /root/.cache -v /go/pkg/mod alpine)
fi
docker run --rm -v /tmp $coverageVol --volumes-from=$cacheVolume --privileged $iid go test $coverageFlags ${TESTFLAGS:--v} ${TESTPKGS:-./...}
if [ -n "$BUILDX_NOCACHE" ]; then
docker rm -v $cacheVolume
fi
rm "$iidfile"
docker rmi $iid

View File

@@ -1,101 +0,0 @@
#!/usr/bin/env bash
set -eu -o pipefail
: ${BUILDX_CMD=docker buildx}
: ${BUILDKIT_IMAGE=moby/buildkit:buildx-stable-1}
: ${BUILDKIT_CFG=}
: ${DRIVER=docker-container}
: ${DRIVER_OPT=}
: ${MULTI_NODE=0}
: ${PLATFORMS=linux/amd64,linux/arm64}
function clean {
rm -rf "$context"
${BUILDX_CMD} rm "$builderName"
}
context=$(mktemp -d -t buildx-output.XXXXXXXXXX)
dockerfile=${context}/Dockerfile
trap clean EXIT
builderName=buildx-test-$(openssl rand -hex 16)
buildPlatformFlag=
if [ "$DRIVER" = "docker" ]; then
builderName=default
else
buildPlatformFlag=--platform="${PLATFORMS}"
fi
driverOpt=image=${BUILDKIT_IMAGE}
if [ -n "$DRIVER_OPT" ]; then
driverOpt=$driverOpt,$DRIVER_OPT
fi
# create builder except for docker driver
if [ "$DRIVER" != "docker" ]; then
if [ "${MULTI_NODE}" = "1" ]; then
firstNode=1
for platform in ${PLATFORMS//,/ }; do
createFlags=""
if [ -f "$BUILDKIT_CFG" ]; then
createFlags="$createFlags --config=${BUILDKIT_CFG}"
fi
if [ "$firstNode" = "0" ]; then
createFlags="$createFlags --append"
fi
(
set -x
${BUILDX_CMD} create ${createFlags} \
--name="${builderName}" \
--node="${builderName}-${platform/\//-}" \
--driver="${DRIVER}" \
--driver-opt="${driverOpt}" \
--platform="${platform}"
)
firstNode=0
done
else
createFlags=""
if [ -f "$BUILDKIT_CFG" ]; then
createFlags="$createFlags --config=${BUILDKIT_CFG}"
fi
(
set -x
${BUILDX_CMD} create ${createFlags} \
--name="${builderName}" \
--driver="${DRIVER}" \
--driver-opt="${driverOpt}" \
--platform="${PLATFORMS}"
)
fi
fi
# multi-platform not supported by docker driver
buildPlatformFlag=
if [ "$DRIVER" != "docker" ]; then
buildPlatformFlag=--platform="${PLATFORMS}"
fi
set -x
# inspect and bootstrap
${BUILDX_CMD} inspect --bootstrap --builder="${builderName}"
# create dockerfile
cat > "${dockerfile}" <<EOL
FROM busybox as build
ARG TARGETPLATFORM
ARG BUILDPLATFORM
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
FROM busybox
COPY --from=build /log /log
RUN cat /log
RUN uname -a
EOL
# build
${BUILDX_CMD} build ${buildPlatformFlag} \
--output="type=cacheonly" \
--builder="${builderName}" \
"${context}"

View File

@@ -1,12 +1,16 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -eu -o pipefail . $(dirname $0)/util
set -eu
: ${BUILDX_CMD=docker buildx}
: ${FORMATS=md}
output=$(mktemp -d -t buildx-output.XXXXXXXXXX) output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
(set -x ; DOCS_FORMATS=$FORMATS ${BUILDX_CMD} bake --set "*.output=$output" update-docs)
buildxCmd build \
--target "update" \
--output "type=local,dest=$output" \
--file "./hack/dockerfiles/docs.Dockerfile" \
.
rm -rf ./docs/reference/* rm -rf ./docs/reference/*
cp -R "$output"/out/* ./docs/ cp -R "$output"/out/* ./docs/
rm -rf $output rm -rf $output

View File

@@ -1,11 +1,16 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -eu -o pipefail . $(dirname $0)/util
set -eu
: ${BUILDX_CMD=docker buildx}
output=$(mktemp -d -t buildx-output.XXXXXXXXXX) output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
(set -x ; ${BUILDX_CMD} bake --set "*.output=$output" update-vendor)
buildxCmd build \
--target "update" \
--output "type=local,dest=$output" \
--file "./hack/dockerfiles/vendor.Dockerfile" \
.
rm -rf ./vendor rm -rf ./vendor
cp -R "$output"/out/* . cp -R "$output"/out/* .
rm -rf $output rm -rf $output

66
hack/util Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env sh
: ${CI=}
: ${PREFER_BUILDCTL=}
: ${PREFER_LEGACY=}
: ${CLI_PLATFORM=}
: ${GITHUB_ACTIONS=}
: ${CACHEDIR_FROM=}
: ${CACHEDIR_TO=}
if [ "$PREFER_BUILDCTL" = "1" ]; then
echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring."
fi
if [ "$PREFER_LEGACY" = "1" ]; then
echo >&2 "WARNING: PREFER_LEGACY is no longer supported. Ignoring."
fi
progressFlag=""
if [ "$CI" = "true" ]; then
progressFlag="--progress=plain"
fi
buildxCmd() {
if docker buildx version >/dev/null 2>&1; then
set -x
docker buildx "$@" $progressFlag
elif buildx version >/dev/null 2>&1; then
set -x
buildx "$@" $progressFlag
elif docker version >/dev/null 2>&1; then
set -x
DOCKER_BUILDKIT=1 docker "$@" $progressFlag
else
echo >&2 "ERROR: Please enable DOCKER_BUILDKIT or install standalone buildx"
exit 1
fi
}
if [ -z "$CLI_PLATFORM" ]; then
if [ "$(uname -s)" = "Darwin" ]; then
arch="$(uname -m)"
if [ "$arch" = "x86_64" ]; then
arch="amd64"
fi
CLI_PLATFORM="darwin/$arch"
elif uname -s | grep MINGW > /dev/null 2>&1 ; then
CLI_PLATFORM="windows/amd64"
fi
fi
cacheType=""
cacheRefFrom=""
cacheRefTo=""
currentref=""
if [ "$GITHUB_ACTIONS" = "true" ]; then
currentref="git://github.com/$GITHUB_REPOSITORY#$GITHUB_REF"
cacheType="local"
cacheRefFrom="$CACHEDIR_FROM"
cacheRefTo="$CACHEDIR_TO"
fi
currentcontext="."
if [ -n "$currentref" ]; then
currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref"
fi

29
hack/validate-docs Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env sh
set -eu
case ${1:-} in
'')
. $(dirname $0)/util
buildxCmd build \
--target validate \
--file ./hack/dockerfiles/docs.Dockerfile \
.
;;
check)
status="$(git status --porcelain -- docs/reference 2>/dev/null)"
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
if [ "$diffs" ]; then
{
set +x
echo 'The result of ./hack/update-docs differs'
echo
echo "$diffs"
echo
echo 'Please vendor your package with ./hack/update-docs'
echo
} >&2
exit 1
fi
echo 'Congratulations! All docs changes are done the right way.'
;;
esac

29
hack/validate-vendor Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env sh
set -eu
case ${1:-} in
'')
. $(dirname $0)/util
buildxCmd build \
--target validate \
--file ./hack/dockerfiles/vendor.Dockerfile \
.
;;
check)
status="$(git status --porcelain -- go.mod go.sum vendor 2>/dev/null)"
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
if [ "$diffs" ]; then
{
set +x
echo 'The result of "make vendor" differs'
echo
echo "$diffs"
echo
echo 'Please vendor your package with "make vendor"'
echo
} >&2
exit 1
fi
echo 'Congratulations! All vendoring changes are done the right way.'
;;
esac

View File

@@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/platformutil"
specs "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -22,9 +21,8 @@ type Node struct {
Endpoint string Endpoint string
Platforms []specs.Platform Platforms []specs.Platform
Flags []string Flags []string
ConfigFile string
DriverOpts map[string]string DriverOpts map[string]string
Files map[string][]byte
} }
func (ng *NodeGroup) Leave(name string) error { func (ng *NodeGroup) Leave(name string) error {
@@ -90,18 +88,10 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
Name: name, Name: name,
Endpoint: endpoint, Endpoint: endpoint,
Platforms: pp, Platforms: pp,
ConfigFile: configFile,
Flags: flags, Flags: flags,
DriverOpts: do, DriverOpts: do,
} }
if configFile != "" {
files, err := confutil.LoadConfigFiles(configFile)
if err != nil {
return err
}
n.Files = files
}
ng.Nodes = append(ng.Nodes, n) ng.Nodes = append(ng.Nodes, n)
if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil { if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil {

View File

@@ -1,166 +0,0 @@
package storeutil
import (
"bytes"
"os"
"strings"
"github.com/docker/buildx/store"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/resolver"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker"
buildkitdconfig "github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/pkg/errors"
)
// GetStore returns current builder instance store
func GetStore(dockerCli command.Cli) (*store.Txn, func(), error) {
s, err := store.New(confutil.ConfigDir(dockerCli))
if err != nil {
return nil, nil, err
}
return s.Txn()
}
// GetCurrentEndpoint returns the current default endpoint value
func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
name := dockerCli.CurrentContext()
if name != "default" {
return name, nil
}
de, err := GetDockerEndpoint(dockerCli, name)
if err != nil {
return "", errors.Errorf("docker endpoint for %q not found", name)
}
return de, nil
}
// GetDockerEndpoint returns docker endpoint string for given context
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return "", err
}
for _, l := range list {
if l.Name == name {
ep, ok := l.Endpoints["docker"]
if !ok {
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
}
typed, ok := ep.(docker.EndpointMeta)
if !ok {
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
}
return typed.Host, nil
}
}
return "", nil
}
// GetCurrentInstance finds the current builder instance
func GetCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
ep, err := GetCurrentEndpoint(dockerCli)
if err != nil {
return nil, err
}
ng, err := txn.Current(ep)
if err != nil {
return nil, err
}
if ng == nil {
ng, _ = GetNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
}
return ng, nil
}
// GetNodeGroup returns nodegroup based on the name
func GetNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
ng, err := txn.NodeGroupByName(name)
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return nil, err
}
}
if ng != nil {
return ng, nil
}
if name == "default" {
name = dockerCli.CurrentContext()
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
return &store.NodeGroup{
Name: "default",
Nodes: []store.Node{
{
Name: "default",
Endpoint: name,
},
},
}, nil
}
}
return nil, errors.Errorf("no builder %q found", name)
}
func GetImageConfig(dockerCli command.Cli, ng *store.NodeGroup) (opt imagetools.Opt, err error) {
opt.Auth = dockerCli.ConfigFile()
if ng == nil || len(ng.Nodes) == 0 {
return opt, nil
}
files := ng.Nodes[0].Files
dt, ok := files["buildkitd.toml"]
if !ok {
return opt, nil
}
config, err := buildkitdconfig.Load(bytes.NewReader(dt))
if err != nil {
return opt, err
}
regconfig := make(map[string]resolver.RegistryConfig)
for k, v := range config.Registries {
rc := resolver.RegistryConfig{
Mirrors: v.Mirrors,
PlainHTTP: v.PlainHTTP,
Insecure: v.Insecure,
}
for _, ca := range v.RootCAs {
dt, ok := files[strings.TrimPrefix(ca, confutil.DefaultBuildKitConfigDir+"/")]
if ok {
rc.RootCAs = append(rc.RootCAs, dt)
}
}
for _, kp := range v.KeyPairs {
key, keyok := files[strings.TrimPrefix(kp.Key, confutil.DefaultBuildKitConfigDir+"/")]
cert, certok := files[strings.TrimPrefix(kp.Certificate, confutil.DefaultBuildKitConfigDir+"/")]
if keyok && certok {
rc.KeyPairs = append(rc.KeyPairs, resolver.TLSKeyPair{
Key: key,
Certificate: cert,
})
}
}
regconfig[k] = rc
}
opt.RegistryConfig = regconfig
return opt, nil
}

View File

@@ -100,9 +100,7 @@ func ParseOutputs(inp []string) ([]client.ExportEntry, error) {
delete(out.Attrs, "dest") delete(out.Attrs, "dest")
case "registry": case "registry":
out.Type = client.ExporterImage out.Type = client.ExporterImage
if _, ok := out.Attrs["push"]; !ok { out.Attrs["push"] = "true"
out.Attrs["push"] = "true"
}
} }
outs = append(outs, out) outs = append(outs, out)

View File

@@ -1,14 +0,0 @@
package cobrautil
import "github.com/spf13/cobra"
// HideInheritedFlags hides inherited flags
func HideInheritedFlags(cmd *cobra.Command, hidden ...string) {
for _, h := range hidden {
// we could use cmd.SetHelpFunc to override the helper
// but, it's not enough because we also want the generated
// docs to be updated, so we override the flag instead
cmd.Flags().String(h, "", "")
_ = cmd.Flags().MarkHidden(h)
}
}

View File

@@ -1,42 +0,0 @@
package confutil
import (
"os"
"path/filepath"
"github.com/docker/cli/cli/command"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ConfigDir will look for correct configuration store path;
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
func ConfigDir(dockerCli command.Cli) string {
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
return buildxConfig
}
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
logrus.Debugf("using default config store %q", buildxConfig)
return buildxConfig
}
// loadConfigTree loads BuildKit config toml tree
func loadConfigTree(fp string) (*toml.Tree, error) {
f, err := os.Open(fp)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, errors.Wrapf(err, "failed to load config from %s", fp)
}
defer f.Close()
t, err := toml.LoadReader(f)
if err != nil {
return t, errors.Wrap(err, "failed to parse config")
}
return t, nil
}

View File

@@ -1,132 +0,0 @@
package confutil
import (
"bytes"
"io"
"os"
"path"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
)
const (
// DefaultBuildKitStateDir and DefaultBuildKitConfigDir are the location
// where buildkitd inside the container stores its state. Some drivers
// create a Linux container, so this should match the location for Linux,
// as defined in: https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
DefaultBuildKitStateDir = "/var/lib/buildkit"
DefaultBuildKitConfigDir = "/etc/buildkit"
)
// LoadConfigFiles creates a temp directory with BuildKit config and
// registry certificates ready to be copied to a container.
func LoadConfigFiles(bkconfig string) (map[string][]byte, error) {
if _, err := os.Stat(bkconfig); errors.Is(err, os.ErrNotExist) {
return nil, errors.Wrapf(err, "buildkit configuration file not found: %s", bkconfig)
} else if err != nil {
return nil, errors.Wrapf(err, "invalid buildkit configuration file: %s", bkconfig)
}
// Load config tree
btoml, err := loadConfigTree(bkconfig)
if err != nil {
return nil, err
}
m := make(map[string][]byte)
// Iterate through registry config to copy certs and update
// BuildKit config with the underlying certs' path in the container.
//
// The following BuildKit config:
//
// [registry."myregistry.io"]
// ca=["/etc/config/myca.pem"]
// [[registry."myregistry.io".keypair]]
// key="/etc/config/key.pem"
// cert="/etc/config/cert.pem"
//
// will be translated in the container as:
//
// [registry."myregistry.io"]
// ca=["/etc/buildkit/certs/myregistry.io/myca.pem"]
// [[registry."myregistry.io".keypair]]
// key="/etc/buildkit/certs/myregistry.io/key.pem"
// cert="/etc/buildkit/certs/myregistry.io/cert.pem"
if btoml.Has("registry") {
for regName := range btoml.GetArray("registry").(*toml.Tree).Values() {
regConf := btoml.GetPath([]string{"registry", regName}).(*toml.Tree)
if regConf == nil {
continue
}
pfx := path.Join("certs", regName)
if regConf.Has("ca") {
regCAs := regConf.GetArray("ca").([]string)
if len(regCAs) > 0 {
var cas []string
for _, ca := range regCAs {
fp := path.Join(pfx, path.Base(ca))
cas = append(cas, path.Join(DefaultBuildKitConfigDir, fp))
dt, err := readFile(ca)
if err != nil {
return nil, errors.Wrapf(err, "failed to read CA file: %s", ca)
}
m[fp] = dt
}
regConf.Set("ca", cas)
}
}
if regConf.Has("keypair") {
regKeyPairs := regConf.GetArray("keypair").([]*toml.Tree)
if len(regKeyPairs) == 0 {
continue
}
for _, kp := range regKeyPairs {
if kp == nil {
continue
}
key := kp.Get("key").(string)
if len(key) > 0 {
fp := path.Join(pfx, path.Base(key))
kp.Set("key", path.Join(DefaultBuildKitConfigDir, fp))
dt, err := readFile(key)
if err != nil {
return nil, errors.Wrapf(err, "failed to read key file: %s", key)
}
m[fp] = dt
}
cert := kp.Get("cert").(string)
if len(cert) > 0 {
fp := path.Join(pfx, path.Base(cert))
kp.Set("cert", path.Join(DefaultBuildKitConfigDir, fp))
dt, err := readFile(cert)
if err != nil {
return nil, errors.Wrapf(err, "failed to read cert file: %s", cert)
}
m[fp] = dt
}
}
}
}
}
b := bytes.NewBuffer(nil)
_, err = btoml.WriteTo(b)
if err != nil {
return nil, err
}
m["buildkitd.toml"] = b.Bytes()
return m, nil
}
func readFile(fp string) ([]byte, error) {
sf, err := os.Open(fp)
if err != nil {
return nil, err
}
defer sf.Close()
return io.ReadAll(io.LimitReader(sf, 1024*1024))
}

View File

@@ -150,7 +150,7 @@ func (r *Resolver) Combine(ctx context.Context, in string, descs []ocispec.Descr
func (r *Resolver) Push(ctx context.Context, ref reference.Named, desc ocispec.Descriptor, dt []byte) error { func (r *Resolver) Push(ctx context.Context, ref reference.Named, desc ocispec.Descriptor, dt []byte) error {
ref = reference.TagNameOnly(ref) ref = reference.TagNameOnly(ref)
p, err := r.resolver().Pusher(ctx, ref.String()) p, err := r.r.Pusher(ctx, ref.String())
if err != nil { if err != nil {
return err return err
} }

View File

@@ -10,10 +10,8 @@ import (
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
"github.com/docker/buildx/util/resolver"
clitypes "github.com/docker/cli/cli/config/types" clitypes "github.com/docker/cli/cli/config/types"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/moby/buildkit/util/tracing"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
@@ -22,36 +20,21 @@ type Auth interface {
} }
type Opt struct { type Opt struct {
Auth Auth Auth Auth
RegistryConfig map[string]resolver.RegistryConfig
} }
type Resolver struct { type Resolver struct {
auth docker.Authorizer r remotes.Resolver
hosts docker.RegistryHosts
} }
func New(opt Opt) *Resolver { func New(opt Opt) *Resolver {
return &Resolver{ resolver := docker.NewResolver(docker.ResolverOptions{
auth: docker.NewDockerAuthorizer(docker.WithAuthCreds(toCredentialsFunc(opt.Auth)), docker.WithAuthClient(http.DefaultClient)), Client: http.DefaultClient,
hosts: resolver.NewRegistryConfig(opt.RegistryConfig), Credentials: toCredentialsFunc(opt.Auth),
}
}
func (r *Resolver) resolver() remotes.Resolver {
return docker.NewResolver(docker.ResolverOptions{
Hosts: func(domain string) ([]docker.RegistryHost, error) {
res, err := r.hosts(domain)
if err != nil {
return nil, err
}
for i := range res {
res[i].Authorizer = r.auth
}
return res, nil
},
Client: tracing.DefaultClient,
}) })
return &Resolver{
r: resolver,
}
} }
func (r *Resolver) Resolve(ctx context.Context, in string) (string, ocispec.Descriptor, error) { func (r *Resolver) Resolve(ctx context.Context, in string) (string, ocispec.Descriptor, error) {
@@ -60,7 +43,7 @@ func (r *Resolver) Resolve(ctx context.Context, in string) (string, ocispec.Desc
return "", ocispec.Descriptor{}, err return "", ocispec.Descriptor{}, err
} }
in, desc, err := r.resolver().Resolve(ctx, ref.String()) in, desc, err := r.r.Resolve(ctx, ref.String())
if err != nil { if err != nil {
return "", ocispec.Descriptor{}, err return "", ocispec.Descriptor{}, err
} }
@@ -82,7 +65,7 @@ func (r *Resolver) Get(ctx context.Context, in string) ([]byte, ocispec.Descript
} }
func (r *Resolver) GetDescriptor(ctx context.Context, in string, desc ocispec.Descriptor) ([]byte, error) { func (r *Resolver) GetDescriptor(ctx context.Context, in string, desc ocispec.Descriptor) ([]byte, error) {
fetcher, err := r.resolver().Fetcher(ctx, in) fetcher, err := r.r.Fetcher(ctx, in)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -1,188 +0,0 @@
package resolver
import (
"crypto/tls"
"crypto/x509"
"net"
"net/http"
"runtime"
"time"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/tracing"
"github.com/pkg/errors"
)
// TODO: copied from buildkit/util/resolver. Update upstream so we can use the same code.
type RegistryConfig struct {
Mirrors []string
PlainHTTP *bool
Insecure *bool
RootCAs [][]byte
KeyPairs []TLSKeyPair
}
type TLSKeyPair struct {
Key []byte
Certificate []byte
}
func fillInsecureOpts(host string, c RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) {
var hosts []docker.RegistryHost
tc, err := loadTLSConfig(c)
if err != nil {
return nil, err
}
var isHTTP bool
if c.PlainHTTP != nil && *c.PlainHTTP {
isHTTP = true
}
if c.PlainHTTP == nil {
if ok, _ := docker.MatchLocalhost(host); ok {
isHTTP = true
}
}
if isHTTP {
h2 := h
h2.Scheme = "http"
hosts = append(hosts, h2)
}
if c.Insecure != nil && *c.Insecure {
h2 := h
transport := newDefaultTransport()
transport.TLSClientConfig = tc
h2.Client = &http.Client{
Transport: tracing.NewTransport(transport),
}
tc.InsecureSkipVerify = true
hosts = append(hosts, h2)
}
if len(hosts) == 0 {
transport := newDefaultTransport()
transport.TLSClientConfig = tc
h.Client = &http.Client{
Transport: tracing.NewTransport(transport),
}
hosts = append(hosts, h)
}
return hosts, nil
}
func loadTLSConfig(c RegistryConfig) (*tls.Config, error) {
tc := &tls.Config{}
if len(c.RootCAs) > 0 {
systemPool, err := x509.SystemCertPool()
if err != nil {
if runtime.GOOS == "windows" {
systemPool = x509.NewCertPool()
} else {
return nil, errors.Wrapf(err, "unable to get system cert pool")
}
}
tc.RootCAs = systemPool
}
for _, p := range c.RootCAs {
tc.RootCAs.AppendCertsFromPEM(p)
}
for _, kp := range c.KeyPairs {
cert, err := tls.X509KeyPair(kp.Certificate, kp.Key)
if err != nil {
return nil, errors.Wrapf(err, "failed to load keypair for %s", kp.Certificate)
}
tc.Certificates = append(tc.Certificates, cert)
}
return tc, nil
}
// NewRegistryConfig converts registry config to docker.RegistryHosts callback
func NewRegistryConfig(m map[string]RegistryConfig) docker.RegistryHosts {
return docker.Registries(
func(host string) ([]docker.RegistryHost, error) {
c, ok := m[host]
if !ok {
return nil, nil
}
var out []docker.RegistryHost
for _, mirror := range c.Mirrors {
h := docker.RegistryHost{
Scheme: "https",
Client: newDefaultClient(),
Host: mirror,
Path: "/v2",
Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve,
}
hosts, err := fillInsecureOpts(mirror, m[mirror], h)
if err != nil {
return nil, err
}
out = append(out, hosts...)
}
if host == "docker.io" {
host = "registry-1.docker.io"
}
h := docker.RegistryHost{
Scheme: "https",
Client: newDefaultClient(),
Host: host,
Path: "/v2",
Capabilities: docker.HostCapabilityPush | docker.HostCapabilityPull | docker.HostCapabilityResolve,
}
hosts, err := fillInsecureOpts(host, c, h)
if err != nil {
return nil, err
}
out = append(out, hosts...)
return out, nil
},
docker.ConfigureDefaultRegistries(
docker.WithClient(newDefaultClient()),
docker.WithPlainHTTP(docker.MatchLocalhost),
),
)
}
func newDefaultClient() *http.Client {
return &http.Client{
Transport: tracing.NewTransport(newDefaultTransport()),
}
}
// newDefaultTransport is for pull or push client
//
// NOTE: For push, there must disable http2 for https because the flow control
// will limit data transfer. The net/http package doesn't provide http2 tunable
// settings which limits push performance.
//
// REF: https://github.com/golang/go/issues/14077
func newDefaultTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 60 * time.Second,
}).DialContext,
MaxIdleConns: 30,
IdleConnTimeout: 120 * time.Second,
MaxIdleConnsPerHost: 4,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 5 * time.Second,
TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper),
}
}

View File

@@ -140,7 +140,7 @@ func testOnGCE() bool {
}() }()
go func() { go func() {
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 { if err != nil || len(addrs) == 0 {
resc <- false resc <- false
return return
@@ -296,7 +296,6 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
// being stable anyway. // being stable anyway.
host = metadataIP host = metadataIP
} }
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix u := "http://" + host + "/computeMetadata/v1/" + suffix
req, err := http.NewRequest("GET", u, nil) req, err := http.NewRequest("GET", u, nil)
if err != nil { if err != nil {

View File

@@ -1,5 +0,0 @@
module github.com/Azure/go-ansiterm
go 1.16
require golang.org/x/sys v0.0.0-20210616094352-59db8d763f22

View File

@@ -1,2 +0,0 @@
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -10,7 +10,6 @@ import (
"syscall" "syscall"
"github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm"
windows "golang.org/x/sys/windows"
) )
// Windows keyboard constants // Windows keyboard constants
@@ -163,28 +162,15 @@ func ensureInRange(n int16, min int16, max int16) int16 {
func GetStdFile(nFile int) (*os.File, uintptr) { func GetStdFile(nFile int) (*os.File, uintptr) {
var file *os.File var file *os.File
switch nFile {
// syscall uses negative numbers case syscall.STD_INPUT_HANDLE:
// windows package uses very big uint32
// Keep these switches split so we don't have to convert ints too much.
switch uint32(nFile) {
case windows.STD_INPUT_HANDLE:
file = os.Stdin file = os.Stdin
case windows.STD_OUTPUT_HANDLE: case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout file = os.Stdout
case windows.STD_ERROR_HANDLE: case syscall.STD_ERROR_HANDLE:
file = os.Stderr file = os.Stderr
default: default:
switch nFile { panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
case syscall.STD_INPUT_HANDLE:
file = os.Stdin
case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout
case syscall.STD_ERROR_HANDLE:
file = os.Stderr
default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
}
} }
fd, err := syscall.GetStdHandle(nFile) fd, err := syscall.GetStdHandle(nFile)

View File

@@ -1,4 +1,4 @@
# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) # go-winio
This repository contains utilities for efficiently performing Win32 IO operations in This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and Go. Currently, this is focused on accessing named pipes and other file handles, and

View File

@@ -28,9 +28,8 @@ const (
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege" SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege" SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
) )
const ( const (

21
vendor/github.com/Microsoft/hcsshim/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,50 @@
package osversion
import (
"fmt"
"sync"
"golang.org/x/sys/windows"
)
// OSVersion is a wrapper for Windows version information
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
type OSVersion struct {
Version uint32
MajorVersion uint8
MinorVersion uint8
Build uint16
}
var (
osv OSVersion
once sync.Once
)
// Get gets the operating system version on Windows.
// The calling application must be manifested to get the correct version information.
func Get() OSVersion {
once.Do(func() {
var err error
osv = OSVersion{}
osv.Version, err = windows.GetVersion()
if err != nil {
// GetVersion never fails.
panic(err)
}
osv.MajorVersion = uint8(osv.Version & 0xFF)
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
osv.Build = uint16(osv.Version >> 16)
})
return osv
}
// Build gets the build-number on Windows
// The calling application must be manifested to get the correct version information.
func Build() uint16 {
return Get().Build
}
func (osv OSVersion) ToString() string {
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
}

View File

@@ -0,0 +1,38 @@
package osversion
const (
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
RS1 = 14393
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
// corresponds to Windows 10 (Creators Update).
RS2 = 15063
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
RS3 = 16299
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
RS4 = 17134
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
RS5 = 17763
// V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
// channel).
V19H1 = 18362
// V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual
// channel).
V19H2 = 18363
// V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual
// channel).
V20H1 = 19041
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
V20H2 = 19042
)

View File

@@ -1,53 +1,2 @@
# Ignore docs files
_gh_pages
_site
# Ignore temporary files
README.html README.html
coverage.out coverage.out
.tmp
# Numerous always-ignore extensions
*.diff
*.err
*.log
*.orig
*.rej
*.swo
*.swp
*.vi
*.zip
*~
# OS or Editor folders
._*
.cache
.DS_Store
.idea
.project
.settings
.tmproj
*.esproj
*.sublime-project
*.sublime-workspace
nbproject
Thumbs.db
# Komodo
.komodotools
*.komodoproject
# SCSS-Lint
scss-lint-report.xml
# grunt-contrib-sass cache
.sass-cache
# Jekyll metadata
docs/.jekyll-metadata
# Folders to ignore
.build
.test
bower_components
node_modules

View File

@@ -1,30 +1,70 @@
language: go language: go
sudo: false sudo: false
matrix: go:
fast_finish: true - 1.8
include: - 1.7.5
- go: 1.14.x - 1.7.4
env: TEST_METHOD=goveralls - 1.7.3
- go: 1.13.x - 1.7.2
- go: 1.12.x - 1.7.1
- go: 1.11.x - 1.7
- go: 1.10.x - tip
- go: tip - 1.6.4
- go: 1.9.x - 1.6.3
- go: 1.8.x - 1.6.2
- go: 1.7.x - 1.6.1
- go: 1.6.x - 1.6
- go: 1.5.x - 1.5.4
allow_failures: - 1.5.3
- go: tip - 1.5.2
- go: 1.11.x - 1.5.1
- go: 1.10.x - 1.5
- go: 1.9.x - 1.4.3
- go: 1.8.x - 1.4.2
- go: 1.7.x - 1.4.1
- go: 1.6.x - 1.4
- go: 1.5.x - 1.3.3
script: ./test.sh $TEST_METHOD - 1.3.2
- 1.3.1
- 1.3
- 1.2.2
- 1.2.1
- 1.2
- 1.1.2
- 1.1.1
- 1.1
before_install:
- go get github.com/mattn/goveralls
script:
- $HOME/gopath/bin/goveralls -service=travis-ci
notifications: notifications:
email: email:
on_success: never on_success: never
matrix:
fast_finish: true
allow_failures:
- go: tip
- go: 1.6.4
- go: 1.6.3
- go: 1.6.2
- go: 1.6.1
- go: 1.6
- go: 1.5.4
- go: 1.5.3
- go: 1.5.2
- go: 1.5.1
- go: 1.5
- go: 1.4.3
- go: 1.4.2
- go: 1.4.1
- go: 1.4
- go: 1.3.3
- go: 1.3.2
- go: 1.3.1
- go: 1.3
- go: 1.2.2
- go: 1.2.1
- go: 1.2
- go: 1.1.2
- go: 1.1.1
- go: 1.1

View File

@@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th
## Project Status ## Project Status
v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.

View File

@@ -1 +0,0 @@
module github.com/agext/levenshtein

Some files were not shown because too many files have changed in this diff Show More